metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joshloyal/LadderNetwork",
"score": 3
} |
#### File: LadderNetwork/ladder_network/nonlinearities.py
```python
import numpy as np
import tensorflow as tf
from ladder_network.wrappers import Registry
activations = Registry('Activation')
@activations.register('relu')
def relu(x, name=None):
return tf.nn.relu(x, name=name)
@activations.register('paired_relu')
def paired_relu(x, name='paired_relu'):
"""Paired ReLUs.
Normal ReLUs suffer from the 'dying ReLU' problem, which occurs when
large gradients activates the weights in such a way that unit can never
activate on any datapoint again (gradients are zero for x < 0). On averge
up to 40% on ReLU neurons can be dead in a network. The Paired Relu
is similar to the CReLU proposed by Shang et. al except here the sign
of the output is preserved.
The Paired ReLU calculates both max(x, 0) and min(x, 0) on the input
so that there is always some path for gradients to flow. In this
case the ouput tensor size is doubled and each path has its own weights.
"""
relu_pairs = [tf.nn.relu(x), -tf.nn.relu(-x)]
return skflow.ops.merge(relu_pairs, mode='concat')
@activations.register('leaky_relu')
def leaky_relu(x, alpha=0., max_value=None, name=''):
"""Leaky Rectified Linear Unit (ReLU)
Parameters
----------
x : Tensor
alpha : float
Slope of negative section.
max_value : float
Saturation threshold
name : str
A name for this activation op
"""
op_scope = skflow.tensor.get_scope(x)
with tf.name_scope(op_scope + name) as scope:
negative_part = tf.nn.relu(-x)
x = tf.nn.relu(x)
if max_value is not None:
x = tf.clip_by_value(
x, tf.cast(0., dtype=tf.float32), tf.cast(max_value, dtype=tf.float32))
if isinstance(alpha, (tuple, list, np.ndarray)) or np.isscalar(alpha):
alpha = tf.constant(alpha, dtype=tf.float32)
x -= alpha * negative_part
return x
@activations.register('prelu')
def prelu(x, alphas_init=0.25, name='prelu'):
"""PReLU.
Parameteric Rectified Linear Unit
Parameters
----------
x : Tensor
alphas_init : float
Value to initialize coefficients
(the default is 0.25, which is used in the original paper).
name : str
Name for the op scope.
References
----------
.. [1] He, et al.
"Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification."
<http://arxiv.org/pdf/1502.01852v1.pdf>
"""
a_shape = skflow.tensor.get_shape(x)[1:]
op_scope = skflow.tensor.get_scope(x)
with tf.variable_op_scope([x], op_scope + name, 'prelu') as scope:
a_init = tf.constant_initializer(alphas_init)
alphas = skflow.tensor.variable('alphas',
shape=a_shape,
initializer=a_init)
x = tf.nn.relu(x) + tf.mul(alphas, (x - tf.abs(x))) * 0.5
# save the alphas in the tensor to make it easy to grab later
x.alphas = alphas
return x
@activations.register('elu')
def elu(x, name=None):
return tf.nn.elu(x, name=name)
@activations.register('sigmoid')
def sigmoid(x, name=None):
return tf.nn.sigmoid(x, name=name)
@activations.register('tanh')
def tanh(x, name=None):
return tf.nn.tanh(x, name=name)
@activations.register('softplus')
def softplus(x, name=None):
return tf.nn.softplus(x, name=name)
@activations.register('identity')
def identity(x, name=None):
return tf.identity(x, name=name)
@activations.register('softmax')
def softmax(x, name=None):
return tf.nn.softmax(x, name=name)
```
#### File: ladder_network/ops/batch_norm_ops.py
```python
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.layers as layers
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.training import moving_averages
import ladder_network.ops.training as training_ops
from ladder_network import tensor_utils
@slim.add_arg_scope
def scale_and_center(tensor_in,
scale=True,
center=True,
reuse=None,
variables_collections=None,
scope=None):
"""Applies the trainable batch-norm correction to a normalized tensor:
u = gamma * (u_pre + beta)
"""
with tf.variable_scope(scope, 'scale_and_offset', [tensor_in],
reuse=reuse) as sc:
tensor_in = tf.convert_to_tensor(tensor_in)
input_shape = tensor_utils.get_shape(tensor_in)
outputs = tensor_in
if center:
beta_collections = layers.utils.get_variable_collections(
variables_collections, "beta")
beta = slim.variable("beta",
shape=[input_shape[-1]],
initializer=tf.zeros_initializer,
collections=beta_collections,
trainable=True)
outputs = outputs + beta
if scale:
gamma_collections = layers.utils.get_variable_collections(
variables_collections, "gamma")
gamma = slim.variable("gamma",
shape=[input_shape[-1]],
initializer=tf.constant_initializer(1.),
collections=gamma_collections,
trainable=True)
outputs = gamma * outputs
return outputs
@slim.add_arg_scope
def batch_normalization(tensor_in,
epsilon=1e-10,
decay=0.9,
variables_collections=None,
outputs_collections=None,
reuse=None,
scope=None):
"""Element-wise batch normalization. This is only the first
half of the typical batch normalization calculation
(standardization by the batch mean and variance).
u = (u_pre - mean) / variance
"""
with tf.variable_scope(scope, 'batch_normalization', [tensor_in],
reuse=reuse) as sc:
tensor_in = tf.convert_to_tensor(tensor_in)
input_shape = tensor_in.get_shape().as_list()
input_ndim = len(input_shape)
axis = list(range(input_ndim - 1))
moving_mean_collections = layers.utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean = slim.variable('moving_mean',
shape=input_shape[-1:],
initializer=tf.zeros_initializer,
collections=moving_mean_collections,
trainable=False)
moving_variance_collections = layers.utils.get_variable_collections(
variables_collections, "moving_variance")
moving_variance = slim.variable('moving_variance',
shape=input_shape[-1:],
initializer=tf.constant_initializer(1.),
collections=moving_variance_collections,
trainable=False)
def update_mean_var():
mean, variance = tf.nn.moments(tensor_in, axis, name='moments')
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
with tf.control_dependencies([update_moving_mean,
update_moving_variance]):
return tf.identity(mean), tf.identity(variance)
is_training = training_ops.get_training_mode()
mean, variance = control_flow_ops.cond(
is_training,
update_mean_var,
lambda: (moving_mean, moving_variance))
layers.utils.collect_named_outputs(
outputs_collections, scope + '/mean', mean)
layers.utils.collect_named_outputs(
outputs_collections, scope + '/variance', variance)
# actually apply the normalization
variance_epsilon = tensor_utils.to_tensor(epsilon, tensor_in.dtype.base_dtype)
return (tensor_in - mean) * tf.rsqrt(variance + variance_epsilon)
```
#### File: ladder_network/ops/training_ops.py
```python
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
def is_training(is_training=False, session=None):
"""is_training.
Set the graph training mode. This is meant to be used to control ops
that have different output at training and testing time, such as
dropout and batch normalization.
Parameters
----------
is_training : bool
What the training mode of the current graph should be set too.
session : tf.Session (optional)
The session that owns the current computational graph.
Examples
--------
...
>>> training_mode = skflow.get_training_mode()
>>> my_conditional_op = tf.cond(training_mode, if_yes_op, if_no_op)
>>> skflow.is_training(True)
>>> session.run(my_conditional_op)
if_yes_op
>>> skflow.is_training(False)
>>> session.run(my_conditional_op)
if_no_op
...
Returns
-------
A `bool`, True if training, False if inference.
"""
if not session:
session = tf.get_default_session()
with session.graph.as_default():
init_training_mode()
if is_training:
tf.get_collection('is_training_ops')[0].eval(session=session)
else:
tf.get_collection('is_training_ops')[1].eval(session=session)
def get_training_mode():
"""get_training_mode
Returns variable in-use to set training mode.
Returns
-------
A `tf.Variable`, the training mode holder.
"""
init_training_mode()
coll = tf.get_collection('is_training')
return coll[0]
def init_training_mode():
"""init_training_mode
Creates `is_training` variable and its ops if they haven't been created yet.
"""
coll = tf.get_collection('is_training')
if len(coll) == 0:
training_phase = tf.get_variable(
'is_training', dtype=tf.bool, shape=[],
initializer=tf.constant_initializer(False),
trainable=False)
tf.add_to_collection('is_training', training_phase)
set_training = tf.assign(training_phase, True)
set_inference = tf.assign(training_phase, False)
tf.add_to_collection('is_training_ops', set_training)
tf.add_to_collection('is_training_ops', set_inference)
def in_train_phase(x, alt):
is_training = get_training_mode()
return control_flow_ops.cond(
is_training,
lambda: x,
lambda: alt)
``` |
{
"source": "joshloyal/MarginalizedDenoisingAutoEncoder",
"score": 2
} |
#### File: mda/tests/test_mda.py
```python
import os
import numpy as np
import pandas as pd
import pytest
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.cross_validation import train_test_split
import mda
file_path = os.path.dirname(__file__)
fixture_path = os.path.join(file_path, 'weights.npy')
data_path = os.path.join(file_path, 'kin8nm_train.csv')
@pytest.mark.thisone
def test_mda_matlab():
df = pd.read_csv(data_path, header=None)
df.pop(8)
X = df.values
mDA = mda.MarginalizedDenoisingAutoencoder(noise_level=0.5)
h = mDA.fit_transform(X)
W_test = np.loadtxt(fixture_path)
#print(mDA.weights[0, :])
#print(mDA.biases[-1])
#print
#print(W_test[0, :])
def test_smda_matlab():
df = pd.read_csv(data_path, header=None)
df.pop(8)
X = df.values
mDA = mda.SMDAutoencoder(n_layers=4, noise_level=0.5)
h = mDA.fit_transform(X)
def test_mda_pipeline():
df = pd.read_csv(data_path, header=None)
y = df.pop(8).values
X = df.values
#mDA = mda.MarginalizedDenoisingAutoencoder(noise_level=0.5)
mDA = mda.SMDAutoencoder(n_layers=4, noise_level=0.5)
X_mda = mDA.fit_transform(X)
print('MDA features')
X_train, X_test, y_train, y_test = train_test_split(X_mda, y, test_size=0.2, random_state=2)
est = RandomForestRegressor(n_estimators=100, random_state=123)
est.fit(X_train, y_train)
print(mean_squared_error(y_test, est.predict(X_test)))
print('No MDA')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2)
est = RandomForestRegressor(n_estimators=100, random_state=123)
est.fit(X_train, y_train)
print(mean_squared_error(y_test, est.predict(X_test)))
``` |
{
"source": "joshloyal/Mosaic",
"score": 3
} |
#### File: mosaic/datasets/cifar10.py
```python
import glob
import os
import itertools
import urllib.request as url_request
import tarfile
import json
import pickle
import numpy as np
import pandas as pd
from sklearn.utils import check_random_state
from mosaic import image_io
from mosaic.datasets.base import get_data_home, ImageDataBundle
from mosaic.datasets.progress_bar import chunk_read
HERE = os.path.dirname(os.path.abspath(__file__))
URL = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
ARCHIVE_NAME = "cifar-10-batches-py.tar.gz"
DATA_NAME = "cifar-10-batches-py"
def download_cifar10_images(target_dir):
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
print(80 * '-')
print("Downloading cifar10 images to {}".format(archive_path))
opener = url_request.urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(chunk_read(opener))
print(80 * '-')
print("Extracting cifar10 images to {}".format(target_dir))
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
def fetch_cifar10_images():
data_home = get_data_home()
cifar10_dir = os.path.join(data_home, DATA_NAME)
if not os.path.exists(cifar10_dir):
download_cifar10_images(data_home)
metadata_path = os.path.join(cifar10_dir, 'batches.meta')
with open(metadata_path, 'rb') as metadata_pkl:
metadata = pickle.load(metadata_pkl, encoding='bytes')
label_list = metadata[b'label_names']
n_train_samples = 50000
images = np.zeros((n_train_samples, 3, 32, 32), dtype='uint8')
labels = []
for i in range(1, 6):
batch_path = os.path.join(cifar10_dir, 'data_batch_' + str(i))
with open(batch_path, 'rb') as batch_pkl:
batch_data = pickle.load(batch_pkl, encoding='bytes')
batch_images = batch_data[b'data'].reshape(-1, 3, 32, 32)
batch_labels = [str(label_list[label], 'utf-8') for
label in batch_data[b'labels']]
images[(i - 1) * 10000: i * 10000, :, :, :] = batch_images
labels.extend(batch_labels)
# we expect images in width X height X channel order
images = images.transpose(0, 2, 3, 1)
labels = np.asarray(labels)
return images, labels
```
#### File: mosaic/datasets/fashion.py
```python
import os
import urllib.request as url_request
import struct
import gzip
import numpy as np
from mosaic.datasets.base import get_data_home
from mosaic.datasets.progress_bar import chunk_read
HERE = os.path.dirname(os.path.abspath(__file__))
URL = "http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/"
DATA_NAME = "fashion"
TRAIN_DATA = dict(description='training set images',
filename='train-images-idx3-ubyte.gz')
TRAIN_LABELS = dict(description='training set labels',
filename='train-labels-idx1-ubyte.gz')
TEST_DATA = dict(description='test set images',
filename='t10k-images-idx3-ubyte.gz')
TEST_LABELS = dict(description='test set labels',
filename='t10k-labels-idx1-ubyte.gz')
LABELS_MAP = ['t_shirt_top', 'trousers', 'pullover', 'dress',
'coat', 'sandal', 'shirt', 'sneaker', 'bag', 'ankle_boot']
def download_fashion_mnist(target_dir):
if not os.path.exists(target_dir):
os.makedirs(target_dir)
for datum in (TRAIN_DATA, TRAIN_LABELS, TEST_DATA, TEST_LABELS):
archive_path = os.path.join(target_dir, datum['filename'])
print(80 * '-')
print("Downloading fashion-mnist {description} to {filename}".format(
description=datum['description'],
filename=archive_path))
opener = url_request.urlopen(URL + datum['filename'])
with open(archive_path, 'wb') as f:
f.write(chunk_read(opener))
def load_fashion(path, kind='train'):
"""Loading utility modified from
https://github.com/zalandoresearch/fashion-mnist/blob/master/utils/mnist_reader.py
"""
if kind not in ['t10k', 'train', 'test']:
raise ValueError("Unrecognized dataset partition. `kind` = {kind}, but must be "
"{'train', 'test', 't10k'}".format(kind=kind))
if kind == 'test':
kind = 't10k'
labels_path = os.path.join(path,
'{kind}-labels-idx1-ubyte.gz'.format(kind=kind))
images_path = os.path.join(path,
'{kind}-images-idx3-ubyte.gz'.format(kind=kind))
with gzip.open(labels_path, 'rb') as label_file:
struct.unpack('>II', label_file.read(8))
labels = np.frombuffer(label_file.read(), dtype=np.uint8)
n_images = len(labels)
with gzip.open(images_path, 'rb') as image_file:
struct.unpack('>IIII', image_file.read(16))
images = np.frombuffer(image_file.read(), dtype=np.uint8).reshape(n_images, 28, 28)
return images, labels
def fetch_fashion_images(kind='train'):
data_home = get_data_home()
fashion_dir = os.path.join(data_home, DATA_NAME)
if not os.path.exists(fashion_dir):
download_fashion_mnist(fashion_dir)
images, labels = load_fashion(fashion_dir, kind=kind)
return images, np.array([LABELS_MAP[label] for label in labels])
```
#### File: mosaic/datasets/rothko.py
```python
import glob
import os
import itertools
import urllib.request as url_request
import tarfile
import json
import numpy as np
from sklearn.utils import check_random_state
from mosaic import image_io
from mosaic.datasets.base import get_data_home, get_bucket, ImageDataBundle
from mosaic.datasets.progress_bar import chunk_read
HERE = os.path.dirname(os.path.abspath(__file__))
URL = get_bucket("rothko_images.tar.gz")
ARCHIVE_NAME = "rothko_images.tar.gz"
DATA_NAME = "rothko_images"
def download_rothko_images(target_dir):
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
print(80 * '-')
print("Downloading rothko images to {}".format(archive_path))
opener = url_request.urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(chunk_read(opener))
print(80 * '-')
print("Extracting rothko images to {}".format(target_dir))
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
def fetch_rothko_images():
data_home = get_data_home()
rothko_dir = os.path.join(data_home, DATA_NAME)
if not os.path.exists(rothko_dir):
download_rothko_images(data_home)
return rothko_dir, image_io.directory_to_dataframe(rothko_dir)
```
#### File: mosaic/features/hsv.py
```python
import numpy as np
from joblib import Parallel, delayed
from skimage import color
from mosaic import image_io
__all__ = ['HSVFeatures', 'HUE', 'SATURATION', 'VALUE', 'extract_hsv_stats']
class HSVFeatures(object):
"""Enum used within the code-base to refer to HSV features."""
#: HUE value
HUE = '__hue__'
SATURATION = '__saturation__'
VALUE = '__value__'
index_map = {
HUE: 0, SATURATION: 1, VALUE: 2
}
@classmethod
def all_features(cls):
return (cls.HUE, cls.SATURATION, cls.VALUE)
@classmethod
def validate(cls, value):
if value not in cls.all_features():
raise ValueError('`value` = {} not a valid color feature.')
@classmethod
def feature_index(cls, value):
return cls.index_map[value]
# Expose the enums
HUE = HSVFeatures.HUE
SATURATION = HSVFeatures.SATURATION
VALUE = HSVFeatures.VALUE
def hsv_features_single(image, agg_func=np.mean, background=None):
"""For each hsv value (hue, saturation, value) calculate
an aggregate statistic (`agg_func`) of that value for a image.
Parameters
----------
image : np.array of shape (width, height, 3)
The image over which the aggregte hsv statistics
are calculated.
agg_func : numpy ufunc (default=np.mean)
A function that will calculate a scalar statistic
over the given values.
background : array-like of shape [3,] (default=None)
The background color value for each hsv channel.
These values will be masked out in the calculation.
If None, then all values are included in the statistics
calculation.
Returns
-------
statistics : tuple
The statistics for each channel (h_mean, s_mean, v_mean).
"""
image = np.asarray(image, dtype=np.uint8)
if len(image.shape) < 3:
image = color.gray2rgb(image)
hsv_image = color.rgb2hsv(image)
if background is not None:
h_channel = hsv_image[:, :, 0]
h_mean = agg_func(
np.ma.array(h_channel, mask=(h_channel == background[0]))
)
h_mean = background[0] if h_mean is np.ma.masked else h_mean
s_channel = hsv_image[:, :, 1]
s_mean = agg_func(
np.ma.array(s_channel, mask=(s_channel == background[1]))
)
s_mean = background[1] if s_mean is np.ma.masked else s_mean
v_channel = hsv_image[:, :, 2]
v_mean = agg_func(
np.ma.array(v_channel, mask=(v_channel == background[2]))
)
v_mean = background[2] if v_mean is np.ma.masked else v_mean
else:
h_mean = agg_func(hsv_image[:, :, 0])
s_mean = agg_func(hsv_image[:, :, 1])
v_mean = agg_func(hsv_image[:, :, 2])
return h_mean, s_mean, v_mean
def extract_hsv_stats(image_list, mode='mean', background=None, n_jobs=1):
"""Extract aggregate statistics in the HSV domain of an RGB image.
A useful ordering tool is the HSV values of an RGB image.
In particular, arranging images by H (hue) will order them by
their color along the color spectrum. This function extracts
a scalar statistic for each HSV channel of every image in an array.
Parameters
----------
image_list : list of lenth [n_samples,]
A list of PIL.Images.
mode : str {'mean', 'median'} (default='mean')
The statistic to extract for each channel.
background : array-like of shape [3,] or str {'white', 'black'], optional
The background color value for each hsv channel.
These values will be masked out in the calculation.
If None, then all values are included in the statistics
calculation.
n_jobs : int (default=1)
Number of jobs to run in parallel.
Returns
-------
np.array of shape [n_samples, 3]
An array containing the hsv statistics for each channel.
"""
if background == 'white':
background = np.array([0, 0, 1], dtype=np.uint8)
elif background == 'black':
background = np.array([0, 0, 0], dtype=np.uint8)
if mode == 'mean':
agg_func = np.mean
elif mode == 'median':
agg_func = np.median
else:
raise ValueError("Unkown mode `{}`.".format(mode))
result = Parallel(n_jobs=n_jobs)(
delayed(hsv_features_single)(image, agg_func, background)
for image in image_list)
return np.vstack(result)
```
#### File: mosaic/tests/test_io.py
```python
from mosaic import image_io
def test_load_image(rgb_image_data):
image_dir, image_list = rgb_image_data
image = image_io.load_image(image_list[0],
image_dir=image_dir,
as_image=True)
def test_load_images(rgb_image_data):
image_dir, image_list = rgb_image_data
images = image_io.load_images(image_list,
image_dir=image_dir,
as_image=True)
assert len(images) == len(image_list)
def test_load_from_directory(rgb_image_data):
image_dir, image_list = rgb_image_data
images = image_io.load_from_directory(image_dir,
as_image=True)
assert len(images) == len(image_list)
``` |
{
"source": "joshloyal/multidynet",
"score": 2
} |
#### File: multidynet/datasets/load_icews.py
```python
import numpy as np
import pandas as pd
import joblib
from os.path import dirname, join
__all__ = ['load_icews']
def load_icews(dataset='small', country_names='full', year_range=None):
module_path = dirname(__file__)
file_path = join(module_path, 'raw_data')
dir_name = 'icews_small' if dataset == 'small' else 'icews_large'
file_name = join(file_path, dir_name, 'numpy_data', 'icews_networks.gz')
Y = joblib.load(open(file_name, 'rb'))
if country_names == 'full':
countries = np.loadtxt(
join(file_path, dir_name, 'numpy_data', 'icews_countries.txt'),
delimiter='\n', dtype=np.unicode)
else:
countries = np.loadtxt(
join(file_path, dir_name, 'numpy_data', 'icews_countries_iso.txt'),
delimiter='\n', dtype=np.unicode)
layer_labels = ['Verbal Cooperation', 'Material Cooperation',
'Verbal Conflict', 'Material Conflict']
time_labels = pd.date_range(
start='January 01 2009', end='December 31 2016', freq='M')
time_labels = time_labels.strftime('%b %Y')
if year_range is not None:
start = 'Jan {}'.format(year_range[0])
end = 'Jan {}'.format(year_range[1] + 1)
start_id = np.where(start == time_labels)[0][0]
end_id = np.where(end == time_labels)[0][0]
Y = Y[:, start_id:end_id, :, :]
time_labels = time_labels[start_id:end_id]
return np.ascontiguousarray(Y), countries, layer_labels, time_labels
```
#### File: multidynet/datasets/synthetic.py
```python
import numpy as np
from scipy.special import expit
from sklearn.utils import check_random_state
__all__ = ['simple_dynamic_multilayer_network', 'simple_dynamic_network',
'dynamic_multilayer_network']
def multilayer_network_from_dynamic_latent_space(X, lmbda, delta,
random_state=None):
rng = check_random_state(random_state)
n_layers, n_time_steps, n_nodes = delta.shape
if delta is None:
delta = np.zeros((n_layers, n_time_steps, n_nodes), dtype=np.float64)
Y = np.zeros((n_layers, n_time_steps, n_nodes, n_nodes), dtype=np.float64)
probas = np.zeros(
(n_layers, n_time_steps, n_nodes, n_nodes), dtype=np.float64)
dists = np.zeros(
(n_layers, n_time_steps, n_nodes, n_nodes), dtype=np.float64)
for k in range(n_layers):
for t in range(n_time_steps):
# sample the adjacency matrix
deltak = delta[k, t].reshape(-1, 1)
eta = np.add(deltak, deltak.T)
if X is not None:
dists[k, t] = np.dot(X[t] * lmbda[k], X[t].T)
eta += dists[k, t]
probas[k, t] = expit(eta)
Y[k, t] = rng.binomial(1, probas[k, t]).astype(np.int)
# make symmetric
Y[k, t] = np.tril(Y[k, t], k=-1)
Y[k, t] += Y[k, t].T
return Y, probas, dists
def simple_dynamic_multilayer_network(n_nodes=100, n_time_steps=4,
n_features=2, tau_sq=1.0, sigma_sq=0.05,
lmbda_scale=1.0,
lmbda=None,
assortative_reference=True,
random_state=42):
rng = check_random_state(random_state)
# construct latent features
X = np.zeros((n_time_steps, n_nodes, n_features), dtype=np.float64)
X[0] = np.sqrt(tau_sq) * rng.randn(n_nodes, n_features)
for t in range(1, n_time_steps):
X[t] = X[t-1] + np.sqrt(sigma_sq) * rng.randn(n_nodes, n_features)
X -= np.mean(X, axis=(0, 1))
# assortative and dissassortative layers
if lmbda is None:
n_layers = 4
lmbda = np.zeros((n_layers, n_features))
if assortative_reference:
lmbda[0] = np.array([1., 1.])
else:
lmbda[0] = -np.array([1., 1.])
lmbda[1] = lmbda_scale * lmbda[0]
lmbda[2] = -lmbda_scale * lmbda[0]
lmbda[3] = -lmbda[0]
else:
n_layers = lmbda.shape[0]
# degree effects
delta = np.zeros((n_layers, n_time_steps, n_nodes))
for k in range(n_layers):
delta[k, 0] = rng.randn(n_nodes)
for t in range(1, n_time_steps):
delta[k, t] = delta[k, t-1] + np.sqrt(0.1) * rng.randn(n_nodes)
# construct the network
Y, probas, dists = multilayer_network_from_dynamic_latent_space(
X, lmbda, delta, random_state=rng)
return Y, X, lmbda, delta, probas, dists
def dynamic_multilayer_network(n_nodes=100, n_layers=4, n_time_steps=10,
n_features=2, tau_sq=4.0, sigma_sq=0.05,
include_delta=True,
sigma_sq_delta=0.1, random_state=42):
rng = check_random_state(random_state)
# construct latent features
n_features = n_features if n_features is not None else 0
if n_features > 0:
X = np.zeros((n_time_steps, n_nodes, n_features), dtype=np.float64)
X[0] = np.sqrt(tau_sq) * rng.randn(n_nodes, n_features)
X[0] -= np.mean(X[0], axis=0)
for t in range(1, n_time_steps):
X[t] = X[t-1] + np.sqrt(sigma_sq) * rng.randn(n_nodes, n_features)
X[t] -= np.mean(X[t], axis=0)
#X -= np.mean(X, axis=(0, 1))
# sample assortativity parameters from a U(-2, 2)
lmbda = np.zeros((n_layers, n_features))
lmbda[0] = rng.choice([-1, 1], size=n_features)
lmbda[1:] = rng.uniform(
-2, 2, (n_layers - 1) * n_features).reshape(n_layers - 1, n_features)
else:
X = None
lmbda = None
# sample degree effects from a U(-4, 4)
delta = np.zeros((n_layers, n_time_steps, n_nodes))
if include_delta:
for k in range(n_layers):
delta[k, 0] = rng.uniform(-4, 4, size=n_nodes)
for t in range(1, n_time_steps):
delta[k, t] = (
delta[k, t-1] + np.sqrt(sigma_sq_delta) * rng.randn(n_nodes))
# construct the network
Y, probas, dists = multilayer_network_from_dynamic_latent_space(
X, lmbda, delta, random_state=rng)
return Y, X, lmbda, delta, probas, dists
def network_from_dynamic_latent_space(X, delta, random_state=None):
rng = check_random_state(random_state)
n_time_steps, n_nodes, _ = X.shape
Y = np.zeros((n_time_steps, n_nodes, n_nodes), dtype=np.float64)
probas = np.zeros(
(n_time_steps, n_nodes, n_nodes), dtype=np.float64)
deltat = delta.reshape(-1, 1)
for t in range(n_time_steps):
# sample the adjacency matrix
eta = np.add(deltat, deltat.T) + np.dot(X[t], X[t].T)
probas[t] = expit(eta)
Y[t] = rng.binomial(1, probas[t]).astype(np.int)
# make symmetric
Y[t] = np.tril(Y[t], k=-1)
Y[t] += Y[t].T
return Y, probas
def simple_dynamic_network(n_nodes=100, n_time_steps=4,
n_features=2, tau_sq=1.0, sigma_sq=0.05,
random_state=42):
rng = check_random_state(random_state)
# construct latent features
X = np.zeros((n_time_steps, n_nodes, n_features), dtype=np.float64)
X[0] = np.sqrt(tau_sq) * rng.randn(n_nodes, n_features)
for t in range(1, n_time_steps):
X[t] = X[t-1] + np.sqrt(sigma_sq) * rng.randn(n_nodes, n_features)
# degree effects
delta = rng.randn(n_nodes)
# construct the network
Y, probas = network_from_dynamic_latent_space(
X, delta, random_state=rng)
return Y, X, delta, probas
```
#### File: multidynet/multidynet/lsm.py
```python
import warnings
import numpy as np
import scipy.sparse as sp
from joblib import Parallel, delayed
from scipy.special import expit
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_array, check_random_state
from sklearn.linear_model import LogisticRegression
from tqdm import tqdm
from .multidynet import initialize_node_effects_single
from .omega_lsm import update_omega
from .deltas_lsm import update_deltas
from .lds_lsm import update_latent_positions
from .variances import update_tau_sq, update_sigma_sq
__all__ = ['DynamicNetworkLSM']
class ModelParameters(object):
def __init__(self, omega, X, X_sigma, X_cross_cov,
delta, delta_sigma,
a_tau_sq, b_tau_sq, c_sigma_sq, d_sigma_sq):
self.omega_ = omega
self.X_ = X
self.X_sigma_ = X_sigma
self.X_cross_cov_ = X_cross_cov
self.delta_ = delta
self.delta_sigma_ = delta_sigma
self.a_tau_sq_ = a_tau_sq
self.b_tau_sq_ = b_tau_sq
self.c_sigma_sq_ = c_sigma_sq
self.d_sigma_sq_ = d_sigma_sq
self.converged_ = False
self.logp_ = []
def initialize_parameters(Y, n_features, delta_var_prior,
a, b, c, d, random_state):
rng = check_random_state(random_state)
n_time_steps, n_nodes, _ = Y.shape
# omega is initialized by drawing from the prior?
omega = np.zeros((n_time_steps, n_nodes, n_nodes))
# intialize latent space randomly
X = rng.randn(n_time_steps, n_nodes, n_features)
# intialize to marginal covariances
sigma_init = np.eye(n_features)
X_sigma = np.tile(
sigma_init[None, None], reps=(n_time_steps, n_nodes, 1, 1))
# initialize cross-covariances
cross_init = np.eye(n_features)
X_cross_cov = np.tile(
cross_init[None, None], reps=(n_time_steps - 1, n_nodes, 1, 1))
# initialize node-effects based on a logistic regression with
# no higher order structure
delta = initialize_node_effects_single(Y)
delta_sigma = delta_var_prior * np.ones(n_nodes)
# initialize based on prior information
a_tau_sq = a
b_tau_sq = b
c_sigma_sq = c
d_sigma_sq = d
return ModelParameters(
omega=omega, X=X, X_sigma=X_sigma, X_cross_cov=X_cross_cov,
delta=delta, delta_sigma=delta_sigma,
a_tau_sq=a_tau_sq, b_tau_sq=b_tau_sq, c_sigma_sq=c_sigma_sq,
d_sigma_sq=d_sigma_sq)
def optimize_elbo(Y, n_features, delta_var_prior, tau_sq, sigma_sq, a, b, c, d,
max_iter, tol, random_state, verbose=True):
# convergence criteria (Eq{L(Y | theta)})
loglik = -np.infty
# initialize parameters of the model
model = initialize_parameters(
Y, n_features, delta_var_prior, a, b, c, d, random_state)
for n_iter in tqdm(range(max_iter), disable=not verbose):
prev_loglik = loglik
# coordinate ascent
# omega updates
loglik = update_omega(
Y, model.omega_, model.X_, model.X_sigma_,
model.delta_, model.delta_sigma_)
# latent trajectory updates
tau_sq_prec = (
model.a_tau_sq_ / model.b_tau_sq_ if tau_sq == 'auto' else
1. / tau_sq)
sigma_sq_prec = (
model.c_sigma_sq_ / model.d_sigma_sq_ if sigma_sq == 'auto' else
1. / sigma_sq)
update_latent_positions(
Y, model.X_, model.X_sigma_, model.X_cross_cov_,
model.delta_, model.omega_, tau_sq_prec, sigma_sq_prec)
# update node random effects
update_deltas(
Y, model.X_, model.delta_, model.delta_sigma_,
model.omega_, delta_var_prior)
# update intial variance of the latent space
if tau_sq == 'auto':
model.a_tau_sq_, model.b_tau_sq_ = update_tau_sq(
Y, model.X_, model.X_sigma_, a, b)
# update step sizes
if sigma_sq == 'auto':
model.c_sigma_sq_, model.d_sigma_sq_ = update_sigma_sq(
Y, model.X_, model.X_sigma_, model.X_cross_cov_, c, d)
model.logp_.append(loglik)
# check convergence
change = loglik - prev_loglik
if abs(change) < tol:
model.converged_ = True
model.logp_ = np.asarray(model.logp_)
break
return model
def calculate_probabilities(X, delta):
n_time_steps = X.shape[0]
n_nodes = X.shape[1]
probas = np.zeros(
(n_time_steps, n_nodes, n_nodes), dtype=np.float64)
deltas = delta.reshape(-1, 1)
for t in range(n_time_steps):
probas[t] = expit(np.add(deltas, deltas.T) + np.dot(X[t], X[t].T))
return probas
class DynamicNetworkLSM(object):
def __init__(self, n_features=2, delta_var_prior=4,
tau_sq='auto', sigma_sq='auto',
a=4.0, b=20.0, c=10, d=0.1,
n_init=1, max_iter=500, tol=1e-2,
n_jobs=-1, random_state=42):
self.n_features = n_features
self.delta_var_prior = delta_var_prior
self.tau_sq = tau_sq
self.sigma_sq = sigma_sq
self.a = a
self.b = b
self.c = c
self.d = d
self.n_init = n_init
self.max_iter = max_iter
self.tol = tol
self.n_jobs = n_jobs
self.random_state = random_state
def fit(self, Y):
"""
Parameters
----------
Y : array-like, shape (n_time_steps, n_nodes, n_nodes)
"""
Y = check_array(Y, order='C', dtype=np.float64,
ensure_2d=False, allow_nd=True, copy=False)
random_state = check_random_state(self.random_state)
# run the elbo optimization over different initializations
seeds = random_state.randint(np.iinfo(np.int32).max, size=self.n_init)
verbose = True if self.n_init == 1 else False
models = Parallel(n_jobs=self.n_jobs)(delayed(optimize_elbo)(
Y, self.n_features, self.delta_var_prior,
self.tau_sq, self.sigma_sq, self.a, self.b, self.c, self.d,
self.max_iter, self.tol, seed, verbose=verbose)
for seed in seeds)
# choose model with the largest convergence criteria
best_model = models[0]
best_criteria = models[0].logp_[-1]
for i in range(1, len(models)):
if models[i].logp_[-1] > best_criteria:
best_model = models[i]
if not best_model.converged_:
warnings.warn('Best model did not converge. '
'Try a different random initialization, '
'or increase max_iter, tol '
'or check for degenerate data.', ConvergenceWarning)
self._set_parameters(best_model)
# calculate dyad-probabilities
self.probas_ = calculate_probabilities(
self.X_, self.delta_)
# calculate in-sample AUC
#self.auc_ = calculate_auc_layer(Y, self.probas_)
return self
def _set_parameters(self, model):
self.omega_ = model.omega_
self.X_ = model.X_
self.X_sigma_ = model.X_sigma_
self.X_cross_cov_ = model.X_cross_cov_
self.delta_ = model.delta_
self.delta_sigma_ = model.delta_sigma_
self.a_tau_sq_ = model.a_tau_sq_
self.b_tau_sq_ = model.b_tau_sq_
self.tau_sq_ = self.b_tau_sq_ / (self.a_tau_sq_ - 1)
self.c_sigma_sq_ = model.c_sigma_sq_
self.d_sigma_sq_ = model.d_sigma_sq_
self.sigma_sq_ = self.d_sigma_sq_ / (self.c_sigma_sq_ - 1)
self.logp_ = model.logp_
return self
```
#### File: multidynet/multidynet/metrics.py
```python
import itertools
import numpy as np
from scipy.special import logit
from sklearn.metrics import roc_auc_score
def calculate_auc_single(Y_true, Y_pred, test_indices=None):
n_time_steps, n_nodes, _ = Y_true.shape
indices = np.tril_indices_from(Y_true[0], k=-1)
y_true = []
y_pred = []
for t in range(n_time_steps):
y_true_vec = Y_true[t][indices]
y_pred_vec = Y_pred[t][indices]
if test_indices is None:
subset = y_true_vec != -1.0
else:
subset = test_indices[t]
y_true.extend(y_true_vec[subset])
y_pred.extend(y_pred_vec[subset])
return roc_auc_score(y_true, y_pred)
def calculate_auc(Y_true, Y_pred, test_indices=None):
if Y_true.ndim == 3:
return calculate_auc_single(Y_true, Y_pred, test_indices=test_indices)
n_layers, n_time_steps, n_nodes, _ = Y_true.shape
indices = np.tril_indices_from(Y_true[0, 0], k=-1)
y_true = []
y_pred = []
for k in range(n_layers):
for t in range(n_time_steps):
y_true_vec = Y_true[k, t][indices]
y_pred_vec = Y_pred[k, t][indices]
if test_indices is None:
subset = y_true_vec != -1.0
else:
subset = test_indices[k, t]
y_true.extend(y_true_vec[subset])
y_pred.extend(y_pred_vec[subset])
return roc_auc_score(y_true, y_pred)
def calculate_eta(X, lmbda, delta):
n_layers = delta.shape[0]
n_time_steps = delta.shape[1]
n_nodes = delta.shape[2]
eta = np.zeros(
(n_layers, n_time_steps, n_nodes, n_nodes), dtype=np.float64)
for k in range(n_layers):
for t in range(n_time_steps):
deltakt = delta[k, t].reshape(-1, 1)
eta_kt = np.add(deltakt, deltakt.T)
if X is not None:
eta_kt += np.dot(X[t] * lmbda[k], X[t].T)
eta[k, t] = eta_kt
return eta
def calculate_lpp(Y, model, test_indices):
n_layers, n_time_steps, _, _ = Y.shape
eta = calculate_eta(model.X_, model.lambda_, model.delta_)
lpp = 0.
indices = np.tril_indices_from(Y[0, 0], k=-1)
for k in range(n_layers):
for t in range(n_time_steps):
y_vec = Y[k, t][indices][test_indices[k, t]]
eta_vec = eta[k, t][indices][test_indices[k, t]]
lpp += (y_vec * eta_vec).sum()
lpp -= np.logaddexp(np.ones(eta_vec.shape[0]), eta_vec).sum()
return lpp
def score_latent_space_t(X_true, X_pred, perm):
"""The estimated latent space is still invariant to column permutations and
sign flips. To fix these we do an exhaustive search over all permutations
and sign flips and return the value with the lowest MSE."""
n_features = X_true.shape[1]
X = X_pred[..., perm]
denom = np.sum(X_true ** 2)
# no flip
best_rel = np.sum((X_true - X) ** 2) / denom
# loops through single feature flips
for p in range(n_features):
Xp = X.copy()
Xp[..., p] = -X[..., p]
rel = np.sum((X_true - Xp) ** 2) / denom
if rel < best_rel:
best_rel = rel
# loop through all feature combinations
for k in range(2, n_features + 1):
for combo in itertools.combinations(range(n_features), k):
Xp = X.copy()
Xp[..., combo] = -X[..., combo]
rel = np.sum((X_true - Xp) ** 2) / denom
if rel < best_rel:
best_rel = rel
return best_rel
def score_latent_space_single_perml(X_true, X_pred):
"""The estimated latent space is still invariant to column permutations and
sign flips. To fix these we do an exhaustive search over all permutations
and sign flips and return the value with the lowest MSE.
NOTE: This function allows the flips and perms to be different over all
time-points
"""
n_time_steps, _, n_features = X_true.shape
best_rel = np.inf
best_perm = None
for perm in itertools.permutations(np.arange(n_features)):
rel = 0
for t in range(X_true.shape[0]):
rel += score_latent_space_t(X_true[t], X_pred[t], perm)
rel /= n_time_steps
if rel < best_rel:
best_rel = rel
best_perm = perm
return best_rel, best_perm
def score_latent_space(X_true, X_pred):
n_time_steps, _, n_features = X_true.shape
rel = 0
for t in range(X_true.shape[0]):
best_rel = np.inf
for perm in itertools.permutations(np.arange(n_features)):
rel_t = score_latent_space_t(X_true[t], X_pred[t], perm)
if rel_t < best_rel:
best_rel = rel_t
rel += best_rel
rel /= n_time_steps
return rel
def score_homophily_matrix(lambda_true, lambda_pred):
n_features = lambda_true.shape[1]
best_rel = np.inf
for perm in itertools.permutations(np.arange(n_features)):
rel = np.sum((lambda_true - lambda_pred[:, perm]) ** 2)
rel /= np.sum(lambda_true ** 2)
if rel < best_rel:
best_rel = rel
return best_rel
def score_social_trajectories(delta_true, delta_pred):
n_layers, n_time_steps, n_nodes = delta_true.shape
num, dem = 0., 0.
indices = np.tril_indices(n_nodes, k=-1)
for k in range(n_layers):
for t in range(n_time_steps):
d_hat = delta_pred[k, t].reshape(-1, 1)
D_hat = d_hat - d_hat.T
d_true = delta_true[k, t].reshape(-1, 1)
D_true = d_true - d_true.T
num += np.sum((D_true[indices] - D_hat[indices]) ** 2)
dem += np.sum(D_true[indices] ** 2)
return num / dem
```
#### File: multidynet/multidynet/plots.py
```python
import numbers
import tempfile
import imageio
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import numpy.linalg as linalg
import seaborn as sns
import pandas as pd
from matplotlib.colors import ListedColormap, to_hex
from matplotlib.patches import Ellipse, Rectangle, FancyArrowPatch
from scipy.stats import norm
from scipy.special import expit
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import check_random_state
from dynetlsm.plots import get_colors
__all__ = ['plot_network', 'make_network_animation',
'plot_sociability', 'plot_lambda', 'plot_node_trajectories',
'plot_pairwise_distances', 'plot_pairwise_probabilities']
def normal_contour(mean, cov, n_std=2, ax=None, **kwargs):
if cov.shape[0] != 2:
raise ValueError('Only for bivariate normal densities.')
eigenvalues, eigenvectors = linalg.eigh(cov)
# sort the eigenvalues and eigenvectors in descending order
order = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[order]
eigenvectors = eigenvectors[:, order]
# determine the angle of rotation
angle = np.degrees(np.arctan2(*eigenvectors[:, 0][::-1]))
if ax is None:
ax = plt.gca()
if isinstance(n_std, numbers.Integral):
# the diameter of the ellipse is twice the square root of the evalues
width, height = 2 * n_std * np.sqrt(eigenvalues)
ellipse = Ellipse(xy=mean, width=width, height=height, angle=angle,
**kwargs)
ax.add_artist(ellipse)
return ellipse
ellipses = []
for std in n_std:
width, height = 2 * std * np.sqrt(eigenvalues)
ellipse = Ellipse(xy=mean, width=width, height=height, angle=angle,
**kwargs)
ax.add_artist(ellipse)
ellipses.append(ellipse)
return ellipses
def plot_network(Y, X, X_sigma=None, delta=None,
z=None, tau_sq=None, normalize=False, figsize=(8, 6),
node_color='orangered', color_distance=False,
colors=None, alpha=1.0, contour_alpha=0.25,
size=300, edgecolors='w',
edge_width=0.25, node_labels=None,
font_size=12, legend_fontsize=12,
with_labels=False, ax=None):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
r = np.sqrt((X ** 2).sum(axis=1)).reshape(-1, 1)
if normalize:
X = X / r
cmap = None
if not isinstance(node_color, np.ndarray):
cmap = ListedColormap(
sns.light_palette(node_color, n_colors=np.unique(r).shape[0]))
G = nx.from_numpy_array(Y)
if node_labels is not None:
labels = {node_id : label for node_id, label in enumerate(node_labels)}
else:
labels = None
if z is None:
if not isinstance(node_color, np.ndarray):
if color_distance:
node_color = r.ravel() / r.min()
else:
node_color = np.asarray([node_color] * X.shape[0])
else:
encoder = LabelEncoder().fit(z)
if colors is None:
colors = get_colors(z.ravel())
node_color = colors[encoder.transform(z)]
# add a legend
for i in range(encoder.classes_.shape[0]):
ax.plot([0], [0], 'o', c=colors[i], label=encoder.classes_[i],
markeredgecolor='w', zorder=0)
ax.plot([0], [0], 'o', markeredgecolor='w', c='w', zorder=0)
# draw latent position credible interval ellipses
if X_sigma is not None:
for i in range(X.shape[0]):
if isinstance(contour_alpha, np.ndarray):
calpha = contour_alpha[i]
else:
calpha = contour_alpha
normal_contour(X[i], X_sigma[i], edgecolor='gray',
facecolor=node_color[i] if isinstance(node_color, np.ndarray) else 'gray',
alpha=calpha, ax=ax, n_std=[2])
nx.draw_networkx(G, X, edge_color='gray', width=edge_width,
node_color=node_color,
node_size=size if delta is None else 0,
alpha=alpha,
cmap=cmap,
labels=labels,
font_size=font_size,
with_labels=with_labels,
edgecolors=edgecolors,
ax=ax)
if delta is not None:
sizes = (delta - delta.min()) / (delta.max() - delta.min())
ax.scatter(X[:, 0], X[:, 1], s=size * sizes,
c='gray')
if X_sigma is not None:
ax.collections[0].set_edgecolor(None)
else:
ax.collections[0].set_edgecolor('white')
ax.axis('equal')
ax.axis('off')
# draw normal contour if available
if tau_sq is not None:
# draw center of latent space
ax.scatter(0, 0, color='k', marker='+', s=200)
# draw two standard deviation contour
normal_contour([0, 0], tau_sq * np.eye(X.shape[1]), n_std=[1],
linestyle='--', edgecolor='k',
facecolor='none', zorder=1, ax=ax)
if z is not None:
ax.legend(loc='lower center', bbox_to_anchor=(0.5, -0.05), ncol=6,
fontsize=legend_fontsize)
return fig, ax
def make_network_animation(filename, Y, X, X_sigma=None,
k=0, z=None, tau_sq=None, normalize=True,
figsize=(8, 6), node_color='orangered',
alpha=1.0, contour_alpha=0.25,
size=300, edge_width=0.25,
node_labels=None, font_size=12, with_labels=False,
layer_labels=None, time_labels=None,
title_fmt='{}, {}', border=0.5, duration=1):
# XXX: hack to shut off plotting within a jupyter notebook...
plt.ioff()
n_layers, n_time_steps, _, _ = Y.shape
if layer_labels is None:
layer_labels = ["k = {}".format(k) for k in range(n_layers)]
if time_labels is None:
time_labels = ["t = {}".format(t) for t in range(n_time_steps)]
with tempfile.TemporaryDirectory() as tempdir:
x_max, y_max = X.max(axis=(0, 1))
x_min, y_min = X.min(axis=(0, 1))
pngs = []
for t in range(Y.shape[1]):
fig, ax = plot_network(Y[k, t], X[t],
X_sigma=X_sigma[t] if X_sigma is not None else None,
z=z, tau_sq=tau_sq,
normalize=normalize, figsize=figsize, node_color=node_color,
alpha=alpha, contour_alpha=contour_alpha,
size=size, edge_width=edge_width,
node_labels=node_labels, font_size=font_size,
with_labels=with_labels,)
ax.set_title(title_fmt.format(layer_labels[k], time_labels[t]))
ax.set_xlim(x_min - border, x_max + border)
ax.set_ylim(y_min - border, y_max + border)
fname = tempfile.TemporaryFile(dir=tempdir, suffix='.png')
fig.savefig(fname, dpi=100)
fname.seek(0)
plt.close(fig) # necessary to free memory
pngs.append(fname)
images = []
for png in pngs:
images.append(imageio.imread(png))
imageio.mimsave(filename, images, duration=duration)
plt.ion()
def plot_static_sociability(model, k=0, node_labels=None, layer_label=None,
ax=None, figsize=(10, 12), color_code=False):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
if node_labels is None:
node_labels = [str(i + 1) for i in range(model.X_.shape[1])]
node_labels = np.asarray(node_labels)
order = np.argsort(model.delta_[k])
odds = np.exp(model.delta_[k][order])
y_pos = np.arange(node_labels.shape[0])
if color_code:
colors = ['steelblue' if odds[i] >= 1. else 'gray' for i in
range(len(odds))]
else:
colors = 'gray'
ax.barh(y_pos, odds, align='center', color=colors)
ax.set_yticks(y_pos)
ax.set_yticklabels(node_labels[order])
ax.set_xlabel('odds [$\exp(\delta_k^i)]$')
if layer_label is not None:
ax.set_title(layer_label)
else:
ax.set_title('k = {}'.format(k))
return fig, ax
def plot_social_trajectories(
model, k=0, q_alpha=0.05, node_list=None, node_colors=None,
node_labels=None, layer_label=None, ref_value=0,
ref_label=None,
plot_hline=True, xlabel='Time', alpha=0.15, fill_alpha=0.2,
line_width=3, ax=None, figsize=(10, 6), label_offset=1,
fontsize=12, color_code=False):
n_layers, n_time_steps, n_nodes = model.delta_.shape
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
ax.set_clip_on(False)
if node_labels is None:
node_labels = [str(i + 1) for i in range(model.delta_.shape[2])]
node_labels = np.asarray(node_labels)
for i in range(n_nodes):
if model.X_ is None:
ax.plot(model.delta_[k, :, i].T, 'k-', alpha=alpha)
else:
ax.plot(model.gamma_[k, :, i].T, 'k-', alpha=alpha)
if node_list is not None:
node_list = np.asarray(node_list)
if node_colors is None:
node_colors = get_colors(np.arange(len(node_list)))
for i, node_label in enumerate(node_list):
node_id = np.where(node_labels == node_label)[0].item()
if model.X_ is None:
ax.plot(model.delta_[k, :, node_id].T, '--',
lw=line_width, c=node_colors[i])
else:
ax.plot(model.gamma_[k, :, node_id].T, '--',
lw=line_width, c=node_colors[i])
ax.annotate(node_label,
xy=(n_time_steps + label_offset,
model.delta_[k, -1, node_id]),
color=node_colors[i], fontsize=fontsize,
annotation_clip=False)
if q_alpha is not None and model.X_ is None:
x_upp = np.zeros(n_time_steps)
x_low = np.zeros(n_time_steps)
z_alpha = norm.ppf(1 - q_alpha / 2.)
ts = np.arange(n_time_steps)
for t in range(n_time_steps):
se = z_alpha * np.sqrt(model.delta_sigma_[k, t, node_id])
x_upp[t] = model.delta_[k, t, node_id] + se
x_low[t] = model.delta_[k, t, node_id] - se
ax.fill_between(
ts, x_low, x_upp, alpha=fill_alpha, color=node_colors[i])
elif q_alpha is not None:
gamma_ci = np.quantile(
model.gammas_, [q_alpha/2., 1 - q_alpha/2.], axis=0)
ax.fill_between(
np.arange(n_time_steps), gamma_ci[0, k, :, node_id],
gamma_ci[1, k, :, node_id],
alpha=fill_alpha, color=node_colors[i])
if plot_hline:
ax.hlines(
ref_value, 0, n_time_steps - 1, lw=2, linestyles='--', color='k')
if ref_label:
ax.annotate(ref_label,
xy=(n_time_steps + label_offset, ref_value),
color='k', fontsize=fontsize)
# remove spines
#ax.spines['right'].set_visible(False)
#ax.spines['left'].set_visible(False)
#ax.spines['top'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
# axis-labels
ax.set_ylabel('Sociality', fontsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
if layer_label is not None:
ax.set_title(layer_label, fontsize=fontsize)
else:
ax.set_title('k = {}'.format(k), fontsize=fontsize)
return fig, ax
def plot_node_trajectories(model, node_list, q_alpha=0.05, node_labels=None,
node_colors=None, nrows=None, ncols=1, alpha=0.2,
linestyle='o--', fontsize=12,
figsize=(10, 8)):
if nrows is None:
nrows = model.X_.shape[2]
fig, axes = plt.subplots(nrows, ncols, figsize=figsize)
ax = axes.flat
if node_labels is None:
node_labels = [i for i in range(model.X_.shape[1])]
node_labels = np.asarray(node_labels)
if node_colors is None:
node_colors = get_colors(np.arange(len(node_list)))
n_time_steps, n_nodes, n_features = model.Z_.shape
z_alpha = norm.ppf(1 - q_alpha / 2.)
ts = np.arange(n_time_steps)
for i, node_label in enumerate(node_list):
node_id = np.where(node_labels == node_label)[0].item()
x_upp = np.zeros(n_time_steps)
x_low = np.zeros(n_time_steps)
for p in range(n_features):
ax[p].plot(ts, model.Z_[:, node_id, p], linestyle,
label=node_labels[node_id], c=node_colors[i])
for t in range(n_time_steps):
se = z_alpha * np.sqrt(model.Z_sigma_[t, node_id, p, p])
x_upp[t] = model.Z_[t, node_id, p] + se
x_low[t] = model.Z_[t, node_id, p] - se
ax[p].fill_between(
ts, x_low, x_upp, alpha=alpha, color=node_colors[i])
# accomodate legends and title
ax[0].legend(bbox_to_anchor=(1.04, 1), loc='upper left', fontsize=fontsize)
ax[-1].set_xlabel('t')
for p in range(n_features):
#ax[p].set_title('p = {}'.format(p + 1), fontsize=fontsize)
ax[p].hlines(0, 1, n_time_steps, lw=2, linestyles='dotted', color='k', alpha=0)
ax[p].set_ylabel('Latent Position [h = {}]'.format(p + 1),
fontsize=fontsize)
ax[p].tick_params(axis='x', labelsize=fontsize)
ax[p].tick_params(axis='y', labelsize=fontsize)
plt.subplots_adjust(right=0.7)
return fig, ax
def sample_distances(model, k, t, i, j, n_reps=1000, random_state=123):
rng = check_random_state(random_state)
Xi = rng.multivariate_normal(model.X_[t, i], model.X_sigma_[t, i],
size=n_reps)
Xj = rng.multivariate_normal(model.X_[t, j], model.X_sigma_[t, j],
size=n_reps)
if k == 0:
lmbdak = np.zeros((n_reps, model.lambda_.shape[1]))
for p in range(model.lambda_.shape[1]):
lmbdak[:, p] = (
2 * rng.binomial(1, model.lambda_proba_[p], size=n_reps) - 1)
else:
lmbdak = rng.multivariate_normal(
model.lambda_[k], model.lambda_sigma_[k], size=n_reps)
return np.sum(lmbdak * Xi * Xj, axis=1)
def plot_pairwise_distances(model, node_i, node_j,
node_labels=None,
layer_labels=None, q_alpha=0.05, n_reps=1000,
random_state=123, alpha=0.2, linestyle='--',
figsize=(10, 8), ax=None):
if ax is not None:
fig = None
else:
fig, ax = plt.subplots(figsize=figsize)
if node_labels is None:
node_labels = [i for i in range(model.X_.shape[1])]
node_labels = np.asarray(node_labels)
n_layers, n_time_steps, n_nodes, _ = model.dist_.shape
ts = np.arange(n_time_steps)
i = np.where(node_labels == node_i)[0].item()
j = np.where(node_labels == node_j)[0].item()
for k in range(n_layers):
if layer_labels is None:
label = 'k = {}'.format(k)
else:
label = layer_labels[k]
if q_alpha is None:
ax.plot(ts, model.dist_[k, :, i, j], linestyle,
label=label)
else:
dist_mean = np.zeros(n_time_steps)
dist_low = np.zeros(n_time_steps)
dist_upp = np.zeros(n_time_steps)
for t in range(n_time_steps):
dist = sample_distances(
model, k, t, i, j, n_reps=n_reps, random_state=random_state)
dist_mean[t] = dist.mean()
dist_low[t] = np.quantile(dist, q=q_alpha / 2.)
dist_upp[t] = np.quantile(dist, q=1 - q_alpha / 2.)
ax.plot(ts, dist_mean, linestyle, label=label)
ax.fill_between(ts, dist_low, dist_upp, alpha=alpha)
break
# accomodate legends and title
ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')
ax.set_xlabel('t')
ax.set_ylabel('Distances ({} - {})'.format(node_i, node_j))
return fig, ax
def sample_link_probability(model, k, t, i, j, n_reps=1000, random_state=123):
rng = check_random_state(random_state)
deltai = rng.normal(
loc=model.delta_[k, t, i], scale=np.sqrt(model.delta_sigma_[k, t, i]),
size=n_reps)
deltaj = rng.normal(
loc=model.delta_[k, t, j], scale=np.sqrt(model.delta_sigma_[k, t, j]),
size=n_reps)
Xi = rng.multivariate_normal(model.X_[t, i], model.X_sigma_[t, i],
size=n_reps)
Xj = rng.multivariate_normal(model.X_[t, j], model.X_sigma_[t, j],
size=n_reps)
if k == 0:
lmbdak = np.zeros((n_reps, model.lambda_.shape[1]))
for p in range(model.lambda_.shape[1]):
lmbdak[:, p] = (
2 * rng.binomial(1, model.lambda_proba_[p], size=n_reps) - 1)
else:
lmbdak = rng.multivariate_normal(
model.lambda_[k], model.lambda_sigma_[k], size=n_reps)
return expit(deltai + deltaj + np.sum(lmbdak * Xi * Xj, axis=1))
def forecast_link_probability(model, k, i, j, horizon=1, n_reps=1000,
random_state=123):
rng = check_random_state(random_state)
n_features = model.X_.shape[-1]
if k == 0:
lmbdak = np.zeros((n_reps, model.lambda_.shape[1]))
for p in range(model.lambda_.shape[1]):
lmbdak[:, p] = (
2 * rng.binomial(1, model.lambda_proba_[p], size=n_reps) - 1)
else:
lmbdak = rng.multivariate_normal(
model.lambda_[k], model.lambda_sigma_[k], size=n_reps)
deltai = rng.normal(
loc=model.delta_[k, -1, i], scale=np.sqrt(model.delta_sigma_[k, -1, i]),
size=n_reps)
deltaj = rng.normal(
loc=model.delta_[k, -1, j], scale=np.sqrt(model.delta_sigma_[k, -1, j]),
size=n_reps)
Xi = rng.multivariate_normal(model.X_[-1, i], model.X_sigma_[-1, i],
size=n_reps)
Xj = rng.multivariate_normal(model.X_[-1, j], model.X_sigma_[-1, j],
size=n_reps)
pis = np.zeros((horizon, n_reps))
for h in range(horizon):
deltai = deltai + rng.normal(
loc=0, scale=np.sqrt(model.sigma_sq_delta_), size=n_reps)
deltaj = deltaj + rng.normal(
loc=0, scale=np.sqrt(model.sigma_sq_delta_), size=n_reps)
Xi = Xi + rng.multivariate_normal(
np.zeros(n_features), model.sigma_sq_ * np.eye(n_features),
size=n_reps)
Xj = Xj + rng.multivariate_normal(
np.zeros(n_features), model.sigma_sq_ * np.eye(n_features),
size=n_reps)
pis[h] = expit(deltai + deltaj + np.sum(lmbdak * Xi * Xj, axis=1))
return pis
def plot_pairwise_probabilities(model, node_i, node_j, horizon=0,
node_labels=None,
layer_labels=None, q_alpha=0.05, n_reps=1000,
random_state=123, alpha=0.2, linestyle='--',
fontsize=16, figsize=(10, 8), ax=None):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
if node_labels is None:
node_labels = [i for i in range(model.X_.shape[1])]
node_labels = np.asarray(node_labels)
n_layers, n_time_steps, n_nodes, _ = model.probas_.shape
ts = np.arange(n_time_steps + horizon)
i = np.where(node_labels == node_i)[0].item()
j = np.where(node_labels == node_j)[0].item()
for k in range(n_layers):
if layer_labels is None:
label = 'k = {}'.format(k)
else:
label = layer_labels[k]
if q_alpha is None:
ax.plot(ts, model.probas_[k, :, i, j], linestyle,
label=label)
else:
pi_mean = np.zeros(n_time_steps + horizon)
pi_low = np.zeros(n_time_steps + horizon)
pi_upp = np.zeros(n_time_steps + horizon)
for t in range(n_time_steps):
pis = sample_link_probability(
model, k, t, i, j, n_reps=n_reps, random_state=random_state)
pi_mean[t] = pis.mean()
pi_low[t] = np.quantile(pis, q=q_alpha / 2.)
pi_upp[t] = np.quantile(pis, q=1 - q_alpha / 2.)
if horizon > 0:
pis = forecast_link_probability(
model, k, i, j, horizon=horizon, n_reps=n_reps,
random_state=random_state)
for h in range(horizon):
pi_mean[n_time_steps + h] = pis[h].mean()
pi_low[n_time_steps + h] = (
np.quantile(pis[h], q=q_alpha / 2.))
pi_upp[n_time_steps + h] = (
np.quantile(pis[h], q=1 - q_alpha / 2.))
ax.plot(ts, pi_mean, linestyle, label=label)
ax.fill_between(ts, pi_low, pi_upp, alpha=alpha)
# accomodate legends and title
ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left', fontsize=fontsize)
ax.set_xlabel('t')
ax.set_ylabel('Link Probability ({} - {})'.format(node_i, node_j), fontsize=fontsize)
return fig, ax
def plot_homophily_matrix(model, q_alpha=0.05,
layer_labels=None, height=0.5, hspace=1.,
fontsize=12, figsize=(12, 6)):
n_layers, n_features = model.lambda_.shape
if layer_labels is None:
layer_labels = ['k = {}'.format(k + 1) for k in range(n_layers)]
fig, axes = plt.subplots(n_layers, 1, figsize=figsize, sharex=True)
z_alpha = norm.ppf(1 - q_alpha / 2.)
for p in range(n_features):
xerr = z_alpha * np.sqrt(model.lambda_sigma_[:, p, p])
for k in range(n_layers):
colors = 'red' if model.lambda_[k, p] > 0 else 'blue'
axes[k].hlines(k + hspace * p, 0, model.lambda_[k, p], lw=1,
color=colors, linestyles='--')
axes[k].errorbar(model.lambda_[k, p], k + hspace * p,
fmt='o',
xerr=xerr[k], ecolor='k', capsize=5,
color='k', markersize=9, markeredgecolor='w')
# add text
for k in range(n_layers):
align = 'right' if model.lambda_[k, p] >= 0 else 'left'
lmbda = model.lambda_[k, p]
if k == 0:
txt = '{} (d = {})'.format(lmbda, p+1)
else:
txt = '{:.3f} ({:.3f}, {:.3f})'.format(
lmbda, lmbda - xerr[k], lmbda + xerr[k])
axes[k].text(lmbda, k + hspace * p - 0.1,
txt, horizontalalignment=align)
for k in range(n_layers):
axes[k].set_yticks([k + hspace / n_features])
axes[k].set_yticklabels([layer_labels[k]], fontsize=fontsize)
axes[k].invert_yaxis()
if k != (n_layers - 1):
axes[k].spines['bottom'].set_visible(False)
axes[k].tick_params(bottom=False)
axes[-1].set_xlabel('Homophily Parameter ($\lambda_{kd}$)',
fontsize=fontsize)
x_max = max([ax.get_xlim()[1] for ax in axes.flat])
for k in range(n_layers):
if np.all(model.lambda_ >= 0):
axes[k].set_xlim(0, axes[k].get_xlim()[1])
else:
axes[k].vlines(0, k, k + hspace * (n_features - 1),
linestyles='dotted', color='k')
sns.despine(ax=axes[k], bottom=True)
sns.set_style('white')
plt.subplots_adjust(hspace=0.5)
return fig, axes
#def plot_homophily_matrix(model, q_alpha=0.05, colors=None,
# layer_labels=None, height=0.5,
# fontsize=12, figsize=(12, 6)):
# n_layers, n_features = model.lambda_.shape
#
# if layer_labels is None:
# layer_labels = ['k = {}'.format(k + 1) for k in range(n_layers)]
#
# fig, ax = plt.subplots(figsize=figsize)
#
# if colors is None:
# colors = get_colors(np.arange(n_layers))
#
# z_alpha = norm.ppf(1 - q_alpha / 2.)
# for p in range(n_features):
# xerr = z_alpha * np.sqrt(model.lambda_sigma_[:, p, p])
#
# #colors = ['red' if model.lambda_[k, p] > 0 else 'blue' for
# # k in range(n_layers)]
# ax.hlines(np.arange(n_layers) + 0.5 * p, 0, model.lambda_[:, p], lw=1,
# color=colors, linestyles='--')
# ax.errorbar(model.lambda_[:, p], np.arange(n_layers) + 0.5 * p,
# fmt='o',
# xerr=xerr, ecolor='k', capsize=5,
# color='k', markersize=9, markeredgecolor='w')
#
# # add text
# for k in range(n_layers):
# align = 'right' if model.lambda_[k, p] >= 0 else 'left'
#
# lmbda = model.lambda_[k, p]
# if k == 0:
# txt = '{}'.format(lmbda)
# else:
# txt = '{:.3f} ({:.3f}, {:.3f})'.format(
# lmbda, lmbda - xerr[k], lmbda + xerr[k])
# ax.text(lmbda, k + 0.5 * p - 0.1, txt, horizontalalignment=align)
#
# ax.set_yticks(np.arange(n_layers) + 0.25)
# ax.set_yticklabels(layer_labels, fontsize=fontsize)
# ax.invert_yaxis()
# #ax.set_title('p = {}'.format(p + 1), fontsize=fontsize)
#
# ax.set_xlabel('Homophily Parameter ($\lambda_{kp}$)',
# fontsize=fontsize)
#
# #x_max = max([ax.get_xlim()[1] for ax in axes.flat])
# if np.all(model.lambda_ >= 0):
# ax.set_xlim(0, ax.get_xlim()[1])
# else:
# ax.vlines(0, 0, n_layers - 0.5 * (n_features - 1), linestyles='dotted', color='k')
# sns.despine(ax=ax, bottom=True)
#
# sns.set_style('white')
#
# return fig, ax
def plot_lambda(model, q_alpha=0.05, layer_labels=None, height=0.5,
fontsize=12,
figsize=(12, 6), include_gridlines=False):
n_layers, n_features = model.lambda_.shape
if layer_labels is None:
layer_labels = ['k = {}'.format(k + 1) for k in range(n_layers)]
if include_gridlines:
sns.set_style('whitegrid')
fig, axes = plt.subplots(n_features, 1, figsize=figsize, sharex=True)
colors = [to_hex(c) for c in sns.color_palette(
'muted', n_colors=n_layers, desat=0.75)]
z_alpha = norm.ppf(1 - q_alpha / 2.)
for p, ax in enumerate(axes.flat):
xerr = z_alpha * np.sqrt(model.lambda_sigma_[:, p, p])
colors = ['red' if model.lambda_[k, p] > 0 else 'blue' for
k in range(n_layers)]
ax.hlines(np.arange(n_layers), 0, model.lambda_[:, p], lw=1,
color=colors, linestyles='--')
ax.errorbar(model.lambda_[:, p], np.arange(n_layers), fmt='o',
xerr=xerr, ecolor='k', capsize=5,
color='k', markersize=9, markeredgecolor='w')
# add text
for k in range(n_layers):
align = 'right' if model.lambda_[k, p] >= 0 else 'left'
lmbda = model.lambda_[k, p]
if k == 0:
txt = '{}'.format(lmbda)
else:
txt = '{:.3f} ({:.3f}, {:.3f})'.format(
lmbda, lmbda - xerr[k], lmbda + xerr[k])
ax.text(lmbda, k - 0.1, txt, horizontalalignment=align)
ax.set_yticks(np.arange(n_layers))
ax.set_yticklabels(layer_labels, fontsize=fontsize)
ax.invert_yaxis()
ax.set_title('h = {}'.format(p + 1), fontsize=fontsize)
axes.flat[-1].set_xlabel('Homophily Parameter ($\lambda_{kh}$)',
fontsize=fontsize)
x_max = max([ax.get_xlim()[1] for ax in axes.flat])
for ax in axes.flat:
if np.all(model.lambda_ >= 0):
ax.set_xlim(0, x_max)
else:
ax.vlines(0, 0, n_layers - 1, linestyles='dotted', color='k')
sns.despine(ax=ax, bottom=True)
sns.set_style('white')
return fig, axes
def plot_network_statistics(stat_sim, stat_obs=None, nrow=1, ncol=None,
time_labels=None, stat_label='Statistic',
time_step=1,
layer_labels=None, figsize=(16, 10),
xlabel='Time'):
n_layers, n_time_steps, _ = stat_sim.shape
if ncol is None:
ncol = n_layers
fig, axes = plt.subplots(nrow, ncol, sharey=True, figsize=figsize)
if time_labels is None:
time_labels = np.arange(n_time_steps) + 1
if layer_labels is None:
layer_labels = np.arange(n_layers) + 1
for k, ax in enumerate(axes.flat):
data = pd.DataFrame()
for t in range(n_time_steps):
data[time_labels[t]] = stat_sim[k, t]
if stat_obs is not None:
ax.plot(np.arange(n_time_steps), stat_obs[k], 'o--', c='k')
sns.boxplot(x='variable', y='value', data=pd.melt(data),
ax=ax, color='white')
ax.set_xticklabels(time_labels[::time_step], rotation=45, fontsize=12)
plt.setp(ax.artists, edgecolor='black')
plt.setp(ax.lines, color='black')
ax.set_xticks([i for i in range(0, n_time_steps, time_step)])
ax.tick_params(axis='y', labelsize=16)
ax.tick_params(axis='x', labelsize=16)
ax.grid(axis='x')
ax.set_title(layer_labels[k], fontsize=24)
ax.set_xlabel(xlabel, fontsize=24)
if k == 0:
ax.set_ylabel(stat_label, fontsize=24)
else:
ax.set_ylabel('')
return fig, axes
``` |
{
"source": "joshloyal/Nettie",
"score": 2
} |
#### File: Nettie/backend/mxnet_backend.py
```python
import six
import numbers
import logging
import numpy as np
from sklearn.metrics import log_loss
from sklearn.utils import check_consistent_length, check_array
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelBinarizer
import mxnet as mx
from mxnet.metric import EvalMetric
__all__ = ['Activation', 'Dense', 'SoftmaxOutput', 'Variable',
'BatchNormalization', 'Dropout', 'Sequential', 'Adam']
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
class LogLoss(object):
def __init__(self):
self.lb_ = None
@property
def __name__(self):
return 'log_loss'
def __call__(self, y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
if self.lb_ is None:
self.lb_ = LabelBinarizer()
T = self.lb_.fit_transform(y_true)
else:
T = self.lb_.transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
Y = np.clip(y_pred, eps, 1 - eps)
if not isinstance(Y, np.ndarray):
raise ValueError('y_pred should be an array of floats.')
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError('y_true and y_pred have different number of classes '
'%d, %d' % (T.shape[1], Y.shape[1]))
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
LOSS_MAP = {'categorical_crossentropy': mx.metric.np(LogLoss())}
class MXNetSymbol(object):
def __init__(self, *args, **kwargs):
self.logger = logging.getLogger(__name__)
self.args = args
self.kwargs = kwargs
@property
def symbol(self):
pass
def __call__(self, prev_symbol=None):
if prev_symbol:
return self.symbol(prev_symbol, *self.args, **self.kwargs)
return self.symbol(*self.args, **self.kwargs)
class Activation(MXNetSymbol):
@property
def symbol(self):
return mx.symbol.Activation
def __call__(self, prev_symbol=None):
""" Overwrite to allow for passing of the name of the activation
directly. In addition, alllow for detection of output layers,
i.e. SoftmaxOutput
"""
assert len(self.args) == 1 # this should be the name of the activaiton
# pop off the activation
activation = self.args[0]
self.args = self.args[1:]
if not isinstance(activation, six.string_types):
raise ValueError('activation type must be a string')
if activation == 'softmax':
self.logger.debug('Detected SoftmaxOutput in activation.')
return mx.symbol.SoftmaxOutput(
prev_symbol, name='softmax', *self.args, **self.kwargs)
elif prev_symbol:
return self.symbol(
prev_symbol, *self.args, act_type=activation, **self.kwargs)
return self.symbol(*self.args, act_type=activation, **self.kwargs)
class LeakyReLU(MXNetSymbol):
@property
def symbol(self):
return mx.symbol.LeakyReLU
@property
def act_type(self):
pass
def __call__(self, prev_symbol=None):
if prev_symbol:
return self.symbol(
prev_symbol, *self.args, act_type=self.act_type, **self.kwargs)
return self.symbol(*self.args, **self.kwargs)
class PReLU(LeakyReLU):
@property
def act_type(self):
return 'prelu'
class Dense(MXNetSymbol):
""" We are going to use the Keras naming convention. We need a base
layer class eventually.
"""
@property
def symbol(self):
return mx.symbol.FullyConnected
def __call__(self, prev_symbol=None):
""" Overwrite to allow for passing num_hidden directly. """
assert len(self.args) == 1 # this should be the number of hidden units
# pop off the activation
num_hidden = self.args[0]
self.args = self.args[1:]
if not isinstance(num_hidden, numbers.Integral) or num_hidden < 0:
raise ValueError('number of hidden units must be a '
'positive integer.')
if prev_symbol:
# HACK: input_shape is used in keras and not mxnet. Lets pop
# it off for now and figure out a better inference later.
if 'input_shape' in self.kwargs:
del self.kwargs['input_shape']
return self.symbol(
prev_symbol, *self.args, num_hidden=num_hidden, **self.kwargs)
return self.symbol(*self.args, num_hidden=num_hidden, **self.kwargs)
class SoftmaxOutput(MXNetSymbol):
@property
def symbol(self):
return mx.symbol.SoftmaxOutput
class Variable(MXNetSymbol):
@property
def symbol(self):
return mx.symbol.Variable
class BatchNormalization(MXNetSymbol):
@property
def symbol(self):
return mx.symbol.BatchNorm
class Dropout(MXNetSymbol):
@property
def symbol(self):
return mx.symbol.Dropout
class Sequential(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
self.prev_symbol = Variable('data')()
@property
def model(self):
return self._model
@model.setter
def model(self, value):
self._model = value
def compile(self, optimizer, loss, class_mode='categorical'):
""" for mxnet this is not necessary, but we have it here for
convenience.
"""
try:
self.loss = LOSS_MAP[loss]
except KeyError:
self.logger.debug('Loss function not found.')
self.loss = 'acc'
self.optimizer = optimizer
def visualize(self):
return mx.viz.plot_network(self.prev_symbol)
def add(self, symbol):
self.prev_symbol = symbol(self.prev_symbol)
def fit(self, X, y, nb_epoch=10, learning_rate=0.01, batch_size=128, validation_split=0.15):
self.model = mx.model.FeedForward(self.prev_symbol,
num_epoch=nb_epoch,
optimizer=self.optimizer,
numpy_batch_size=batch_size,
learning_rate=learning_rate)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=validation_split)
self.model.fit(X_train,
y_train,
eval_metric=self.loss,
eval_data=[X_test, y_test])
def predict(self, X):
return self.model.predict(X)
# direct imports
Adam = mx.optimizer.Adam
``` |
{
"source": "joshloyal/pydata-amazon-products",
"score": 3
} |
#### File: pydata-amazon-products/amazon_products/bokeh_plots.py
```python
import os
import bokeh.plotting
import bokeh.models
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
def colors_from_column(hue_column):
"""Apply a color palette based on the values in a column.
Parameters
----------
hue_column : pandas.Series
The column to map to a set of hex colors.
Returns
-------
A pandas.Series containing the hex color values of each point.
"""
encoder = LabelEncoder()
labels = pd.Series(LabelEncoder().fit_transform(hue_column.values))
pallette = sns.color_palette().as_hex()
return labels.apply(lambda x: pallette[x]).tolist()
def select_filter_table(fig, column_name, source=None, name=None, **kwargs):
"""Adds a HTML table that is filtered based on a box select tool.
Parameters
----------
fig : bokeh.plotting.Figure
Figure on which to plot
column_name : str
Name of the column in the figure's ColumnDataSource
source : bokeh.models.ColumnDataSource
The column data source of 'fig'
name : str
Bokeh series name to give to the selected data.
**kwargs
Any further arguments to be passed to fig.scatter
Returns
-------
bokeh.plotting.Figure
Figure containing the row contatenated `fig` and table.
"""
# add an html table
if source is None:
source = fig.select(dict(type=bokeh.models.ColumnDataSource))[0]
table_source = bokeh.models.ColumnDataSource(data=source.to_df())
# Check if the figure as a box selector. If not add one.
box_selector = fig.select(dict(type=bokeh.models.BoxSelectTool))
if not box_selector:
name = 'main' if name is None else name
box_selector = bokeh.models.BoxSelectTool(name=name)
fig.add_tools(box_selector)
columns = [
bokeh.models.widgets.TableColumn(field=column_name, title=column_name)
]
data_table = bokeh.models.widgets.DataTable(
source=table_source, columns=columns, **kwargs)
selector_filename = 'filter_select.js'
current_dir = os.path.dirname(__file__)
with open(os.path.join(current_dir, selector_filename)) as cbk_js:
callback_code = cbk_js.read() % column_name
generic_callback = bokeh.models.CustomJS(
args=dict(source=table_source, target_obj=data_table),
code=callback_code
)
source.callback = generic_callback
return bokeh.layouts.row([fig, data_table])
def hover_tooltip(fig, source, cols=None, name=None):
"""Add a hover tooltip that displays the value in `cols` of the
selected point.
Parameters
----------
fig : bokeh.plotting.Figure
Figure on which to plot
source : bokeh.models.ColumnDataSource
The column data source of 'fig'
cols : list (default=None)
Name of the columns displayed in the hover tool.
name : str
Bokeh series name to give to the selected data.
Returns
-------
bokeh.plotting.Figure
Figure with the hover tool added.
"""
# Create the hover tool, and make sure it is only active with
# the series we plotted in with name.
name = 'main' if name is None else name
hover = bokeh.models.HoverTool(names=[name])
if cols is None:
# Display *all* columns in the tooltips
hover.tooltips = [(c, '@' + c) for c in source.column_names]
else:
# Display just the given columns in the tooltips
hover.tooltips = [(c, '@' + c) for c in cols]
hover.tooltips.append(('index', '$index'))
# Finally add/enable the tool
fig.add_tools(hover)
return fig
def scatter_plot(x, y, data, hue=None,
table_column=None, hover_columns=None,
title=None, fig=None, name=None, marker='circle',
fig_width=500, fig_height=500,
hide_axes=True, hide_grid=True, **kwargs):
"""Plots an interactive scatter plot of `x` vs `y` using bokeh. Contains
an additional table that will be filtered based on the selected data.
Parameters
----------
x : str
Name of the column to use for the x-axis values
y : str
Name of the column to use for the y-axis values
data : pandas.DataFrame
DataFrame containing the data to be plotted.
hue : str
Name of the column to use to color code the scatter plot.
table_column : str (default=None)
The column to use to create the filterable table. If None then
no table is displayed.
fig : bokeh.plotting.Figure, optional
Figure on which to plot (if not given then a new figure will be created)
name : str
Bokeh series name to give to the scattered data
marker : str
Name of marker to use for scatter plot
**kwargs
Any further arguments to be passed to fig.scatter
Returns
-------
bokeh.plotting.Figure
Figure (the same as given, or the newly created figure)
"""
data = data.copy()
# If we haven't been given a Figure obj then create it with default
# size etc.
if fig is None:
tools = 'box_zoom,reset,help'
fig = bokeh.plotting.figure(
width=fig_width, height=fig_height, tools=tools,
title=title)
if hide_axes:
fig.xaxis.visible = False
fig.yaxis.visible = False
if hide_grid:
fig.xgrid.grid_line_color = None
fig.ygrid.grid_line_color = None
# add hue if necessary
if hue:
if hue not in data.columns:
raise ValueError('Column `{}` specified for `hue` '
'not in dataframe. '.format(hue))
data['hue'] = colors_from_column(data[hue])
kwargs['color'] = 'hue'
# We're getting data from the given dataframe
source = bokeh.models.ColumnDataSource(data=data)
# We need a name so that we can restrict hover tools to just this
# particular 'series' on the plot. You can specify it (in case it
# needs to be something specific for other reasons), otherwise
# we just use 'main'
if name is None:
name = 'main'
# Actually do the scatter plot
# (other keyword arguments will be passed to this function)
fig.scatter(x, y, source=source, name=name, marker=marker, **kwargs)
if hover_columns is not None:
fig = hover_tooltip(fig, source=source, cols=hover_columns)
if table_column is not None:
fig = select_filter_table(fig, table_column, source=source)
return fig
``` |
{
"source": "joshloyal/reduce-learn",
"score": 2
} |
#### File: sliced/test/test_save.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import pytest
from scipy import sparse
from scipy import linalg
from sklearn.datasets import load_digits
from sliced import SlicedAverageVarianceEstimation
from sliced import datasets
def test_cubic():
X, y = datasets.make_cubic(random_state=123)
save = SlicedAverageVarianceEstimation().fit(X, y)
true_beta = (1 / np.sqrt(2)) * np.hstack((np.ones(2), np.zeros(8)))
angle = np.dot(true_beta, save.directions_[0, :])
np.testing.assert_allclose(np.abs(angle), 1, rtol=1e-1)
def test_regression():
"""NOTE: subsequent calls may flip the direction of eigenvectors
(mulitply by -1), so we can only compare absolute values.
This was not a problem for svds.. investigate if we can get
deterministic behavior back.
"""
X, y = datasets.make_quadratic(random_state=123)
for n_dir in range(1, X.shape[1]):
save = SlicedAverageVarianceEstimation(n_directions=n_dir)
# take shape is correct
X_save = save.fit(X, y).transform(X)
np.testing.assert_equal(X_save.shape[1], n_dir)
# should match fit_transform
X_save2 = save.fit_transform(X, y)
np.testing.assert_allclose(np.abs(X_save), np.abs(X_save2))
# call transform again and check if things are okay
X_save = save.transform(X)
X_save2 = save.fit_transform(X, y)
np.testing.assert_allclose(np.abs(X_save), np.abs(X_save2))
# there is one true angle it should fine
true_beta = (1 / np.sqrt(2)) * np.hstack((np.ones(2), np.zeros(8)))
angle = np.dot(true_beta, save.directions_[0, :])
np.testing.assert_allclose(np.abs(angle), 1, rtol=1e-1)
def test_n_directions_none():
X, y = datasets.make_cubic(random_state=123)
sir = SlicedAverageVarianceEstimation(n_directions=None).fit(X, y)
np.testing.assert_equal(sir.n_directions_, X.shape[1])
def test_n_slices_too_big():
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]],
dtype=np.float64)
y = np.array([1, 1, 1, 0, 0, 0])
save = SlicedAverageVarianceEstimation(n_directions=1, n_slices=10)
save.fit(X, y)
assert save.n_slices_ == 2
def test_single_y_value():
rng = np.random.RandomState(123)
X = rng.randn(100, 4)
y = np.ones(100)
with pytest.raises(ValueError):
SlicedAverageVarianceEstimation().fit(X, y)
def test_sparse_not_supported():
X, y = datasets.make_cubic(random_state=123)
X = sparse.csr_matrix(X)
with pytest.raises(TypeError):
SlicedAverageVarianceEstimation().fit(X, y)
def test_n_directions_auto_heuristic():
X, y = datasets.make_exponential(random_state=123)
save = SlicedAverageVarianceEstimation(n_directions='auto').fit(X, y)
assert save.n_directions_ == 2
X_save = save.transform(X)
assert X_save.shape == (500, 2)
def test_zero_variance_features():
"""Raise an informative error message when features of zero variance."""
X, y = load_digits(return_X_y=True)
with pytest.raises(linalg.LinAlgError):
save = SlicedAverageVarianceEstimation(n_directions='auto').fit(X, y)
@pytest.mark.skipif(sys.platform == 'win32',
reason=("Lapack's eigh is not deterministic across ",
"platforms. The sign of some eigenvectors is ",
"flipped on win32."))
def test_matches_swiss_banknote():
"""Test that the results match the R dr package on a few common datasets.
"""
X, y = datasets.load_banknote()
save = SlicedAverageVarianceEstimation(n_directions=4).fit(X, y)
np.testing.assert_allclose(
save.eigenvalues_,
np.array([0.87239404, 0.42288351, 0.12792117, 0.03771284])
)
expected_directions = np.array(
[[0.03082069, 0.20309393, -0.25314643, -0.58931337, -0.56801632,
0.47306135],
[-0.2841728, -0.05472057, -0.15731808, 0.50606843, 0.33404888,
0.72374622],
[0.09905744, -0.88896348, 0.42252244, -0.00162151, -0.09222179,
-0.11357311],
[0.75251819, -0.26448055, 0.59669025, 0.03982343, -0.018666,
0.07611073]],
)
np.testing.assert_allclose(
save.directions_, expected_directions, atol=1e-8)
``` |
{
"source": "joshloyal/resume-template",
"score": 3
} |
#### File: resume-template/scripts/run_server.py
```python
import threading
import webbrowser
from http.server import HTTPServer, SimpleHTTPRequestHandler
def run_server(server_class=HTTPServer, handler_class=SimpleHTTPRequestHandler):
server_address = ('', 8000)
httpd = server_class(server_address, handler_class)
httpd.serve_forever()
def run_browser(browser=''):
browser = webbrowser.get(browser if browser else None)
browser.open('http://localhost:8000/', new=2)
def run_site():
threading.Thread(target=run_server).start()
threading.Thread(target=run_browser).start()
if __name__ == '__main__':
run_site()
``` |
{
"source": "joshloyal/Vary",
"score": 2
} |
#### File: vary/flows/base.py
```python
import abc
import six
import tensorflow as tf
from vary import ops
from vary import tensor_utils
@six.add_metaclass(abc.ABCMeta)
class _Flow(object):
"""Single iteration of a normalizing flow"""
def __init__(self, name, n_latent_dim, random_state=123):
self.name = name
self.random_state = random_state
self.n_latent_dim = n_latent_dim
self.built_ = False
self.build()
def build(self):
self._built = True
@abc.abstractmethod
def transform(self, z_sample, features=None):
pass
def log_det_jacobian(self, z_sample):
"""Optional calculation of the log(det(Jacobian)) of the transformation
from purely the samples and inputs."""
raise NotImplementedError('`log_det_jacobian` is not implemented '
'for %s' % self.__class__.__name__)
class _VolumePreservingFlow(_Flow):
"""Volume preserving flow."""
def log_det_jacobian(self, z_sample):
batch_size = tf.shape(z_sample)[0]
return tf.zeros([batch_size])
@six.add_metaclass(abc.ABCMeta)
class NormalizingFlow(object):
def __init__(self, name, n_iter=2, random_state=123):
self.name = name
self.n_iter = n_iter
self.random_state = random_state
self._built = False
@abc.abstractproperty
def flow_class(self):
pass
def build(self, n_latent_dim):
if not self._built:
self._flows = [self.flow_class(self.name + '_%i' % i,
n_latent_dim,
random_state=self.random_state)
for i in range(self.n_iter)]
self._built = True
def transform(self, z_sample, features=None):
"""
Returns
-------
q_z_k : tensor of shape [batch_size, n_latent_dim]
A sample from the posterior of the distribution obtained
by applying the householder transformation.
"""
with tf.variable_scope(self.name + '_transform',
[z_sample]):
if features is not None:
features = tensor_utils.to_tensor(features, dtype=tf.float32)
z_sample = tensor_utils.to_tensor(z_sample, dtype=tf.float32)
n_latent_dim = tensor_utils.get_shape(z_sample)[1]
self.build(n_latent_dim)
log_det_jacobians = []
for flow in self._flows:
z_sample, log_det_jac = flow.transform(z_sample, features=features)
log_det_jacobians.append(log_det_jac)
return z_sample, ops.flatten(tf.add_n(log_det_jacobians))
```
#### File: vary/flows/identity.py
```python
from vary.flows.registry import RegisterFlow
from vary.flows.base import NormalizingFlow
from vary.flows.base import _VolumePreservingFlow
class _IdentityFlow(_VolumePreservingFlow):
def transform(self, z_sample, features=None):
return z_sample, self.log_det_jacobian(z_sample)
@RegisterFlow('identity')
class IdentityFlow(NormalizingFlow):
"""No-op for consistency."""
def __init__(self, n_iter=2, random_state=123):
super(IdentityFlow, self).__init__(
name='identity_flow',
n_iter=n_iter,
random_state=random_state)
@property
def flow_class(self):
return _IdentityFlow
```
#### File: vary/flows/planar.py
```python
import tensorflow as tf
from tensorflow.contrib import slim
from vary.flows.registry import RegisterFlow
from vary.flows.base import NormalizingFlow
from vary.flows.base import _Flow
from vary import tensor_utils as tensor_utils
from vary import ops
class _PlanarFlow(_Flow):
def build(self):
"""Calculate a vector u_hat that ensure invertibility (appendix A.1)"""
with tf.variable_scope(self.name + '_build'):
self.planar_U = tf.get_variable('planar_U',
shape=[self.n_latent_dim, 1],
initializer=None,
dtype=tf.float32,
trainable=True)
self.planar_W = tf.get_variable('planar_W',
shape=[self.n_latent_dim, 1],
initializer=None,
dtype=tf.float32,
trainable=True)
self.planar_bias = tf.get_variable('planar_bias',
shape=[1],
initializer=tf.zeros_initializer(),
dtype=tf.float32,
trainable=True)
UW = ops.vector_dot(self.planar_U, self.planar_W)
mUW= -1 + tf.nn.softplus(UW) # -1 + log(1 + exp(uw))
norm_W = self.planar_W / tf.reduce_sum(self.planar_W ** 2)
self.U_hat = self.planar_U + (mUW - UW) * norm_W # [n_latent_dim, 1]
super(_PlanarFlow, self).build()
def transform(self, z_sample, features=None):
with tf.variable_scope(self.name + '_transform', [z_sample]):
# the forward transformation (Eq. 8)
z_hat = tf.nn.xw_plus_b(z_sample, self.planar_W, self.planar_bias)
z_trans = z_sample + ops.as_row(self.U_hat) * tf.nn.tanh(z_hat)
## psi = h'(w^T * z + b) * w
## where h' = tanh' = 1 - tanh**2
psi = tf.matmul(1 - tf.nn.tanh(z_hat) ** 2, ops.as_row(self.planar_W))
## log_det_jac = log(|1 + u_hat^T * psi|)
log_det_jac = tf.log(tf.abs(1 + tf.matmul(psi, self.U_hat)))
return z_trans, log_det_jac
@RegisterFlow('planar')
class PlanarFlow(NormalizingFlow):
def __init__(self, n_iter=2, random_state=123):
super(PlanarFlow, self).__init__(
name='planar_flow',
n_iter=n_iter,
random_state=random_state)
@property
def flow_class(self):
return _PlanarFlow
#@RegisterFlow('inverse_autoregressive')
#class InverseAutoRegressiveFlow(NormalizingFlow):
# pass
```
#### File: vary/variational_autoencoder/vae.py
```python
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
import tensorflow.contrib.distributions as distributions
from tensorflow.contrib.bayesflow import stochastic_tensor as st
from vary.base import BaseTensorFlowModel
from vary import flows as flow_lib
from vary import tensor_utils as tensor_utils
from vary import ops
def variational_autoencoder(features,
n_latent_dim=2,
hidden_units=[500, 500],
normalizing_flow='identity',
flow_n_iter=2,
kl_weight=1.0,
random_state=123):
features = tensor_utils.to_tensor(features, dtype=tf.float32)
kl_weight = tensor_utils.to_tensor(kl_weight, dtype=tf.float32)
n_features = tensor_utils.get_shape(features)[1]
with tf.variable_scope('inference_network'):
q_mu, q_sigma = ops.gaussian_inference_network(x=features,
n_latent_dim=n_latent_dim,
hidden_units=hidden_units)
#q_mu, q_chol = ops.mvn_inference_network(x=features,
# n_latent_dim=n_latent_dim,
# hidden_units=hidden_units)
# set up the latent variables
with tf.variable_scope('latent_samples'):
with st.value_type(st.SampleValue()):
q_z = st.StochasticTensor(
dist=distributions.Normal(mu=q_mu, sigma=q_sigma),
name='q_z')
#q_z = st.StochasticTensor(
# dist=distributions.MultivariateNormalCholesky(
# mu=q_mu, chol=q_chol),
# name='q_z')
# transform the sample to a more complex density by performing
# a normalizing flow transformation
norm_flow = flow_lib.get_flow(normalizing_flow,
n_iter=flow_n_iter,
random_state=random_state)
q_z_trans, log_det_jac = norm_flow.transform(q_z, features=features)
# set up the priors
with tf.variable_scope('prior'):
prior = distributions.Normal(
mu=np.zeros(n_latent_dim, dtype=np.float32),
sigma=np.ones(n_latent_dim, dtype=np.float32))
with tf.variable_scope('generative_network'):
p_x_given_z = ops.bernoulli_generative_network(
z=q_z_trans,
hidden_units=hidden_units,
n_features=n_features)
# set up elbo
log_likelihood = tf.reduce_sum(p_x_given_z.log_pmf(features), 1)
kl = tf.reduce_sum(distributions.kl(q_z.distribution, prior), 1)
neg_elbo = -tf.reduce_mean(log_likelihood + log_det_jac - kl_weight * kl, 0)
return q_mu, tf.identity(neg_elbo, name='neg_elbo')
def vae_model(train_fn,
n_latent_dim=2,
hidden_units=[500, 500],
normalizing_flow='identity',
flow_n_iter=2):
def model_spec(features, labels=None):
q_mu, neg_elbo = variational_autoencoder(
features, n_latent_dim, hidden_units,
normalizing_flow, flow_n_iter)
train_op = train_fn(neg_elbo)
return q_mu, neg_elbo, train_op
return model_spec
class GaussianVAE(BaseTensorFlowModel):
def __init__(self,
n_latent_dim=2,
hidden_units=[500, 500],
normalizing_flow='identity',
flow_n_iter=2,
kl_weight=1.0,
n_iter=10,
learning_rate=1e-3,
optimizer='Adam',
batch_size=32,
n_jobs=1,
random_state=123):
self.n_latent_dim = n_latent_dim
self.hidden_units = hidden_units
self.normalizing_flow = normalizing_flow
self.flow_n_iter = flow_n_iter
self.kl_weight = kl_weight
super(GaussianVAE, self).__init__(
n_iter=n_iter,
learning_rate=learning_rate,
optimizer=optimizer,
batch_size=batch_size,
n_jobs=n_jobs,
random_state=random_state)
def _model_spec(self):
return vae_model(
self._train_op,
n_latent_dim=self.n_latent_dim,
hidden_units=self.hidden_units,
normalizing_flow=self.normalizing_flow,
flow_n_iter=self.flow_n_iter)
``` |
{
"source": "joshloyal/YouAreNotMySupervisor",
"score": 3
} |
#### File: YouAreNotMySupervisor/examples/plot.py
```python
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn.manifold as manifold
from sklearn.decomposition import PCA, TruncatedSVD, RandomizedPCA
from pandas.tools.plotting import parallel_coordinates
from bokeh_plots import scatter_with_hover
from bokeh_server import bokeh_server
sns.set_context('poster')
sns.set_color_codes()
plot_kwargs = {'alpha': 0.25, 's': 50, 'linewidth': 0}
color_palette = sns.color_palette('deep', 8)
algorithm_class_dict = {
'mds': manifold.MDS,
'tsne': manifold.TSNE,
'pca': PCA,
}
algorithm_kwargs_dict = {
'mds': dict(n_components=2, max_iter=100, n_init=1, random_state=0),
'tsne': dict(n_components=2, init='pca', random_state=0),
'pca': dict(n_components=2)
}
def plot_2d(data, labels=None, probabilities=None, algorithm='tsne', algorithm_kwargs=None):
if data.shape[1] > 2:
algorithm_class = algorithm_class_dict[algorithm]
if algorithm_kwargs:
algorithm = algorithm_class(**algorithm_kwargs)
else:
algorithm = algorithm_class(**algorithm_kwargs_dict[algorithm])
Y = algorithm.fit_transform(data)
else:
Y = data
color_palette = sns.color_palette('deep', len(np.unique(labels)))
if labels is not None:
cluster_colors = [color_palette[x] if x >= 0 else
(0.5, 0.5, 0.5) for
x in labels]
if probabilities is not None and np.isfinite(probabilities):
cluster_member_colors = [sns.desaturate(x, p) for x, p in
zip(cluster_colors, probabilities)]
else:
cluster_member_colors = cluster_colors
else:
cluster_member_colors = 'b'
plt.scatter(Y[:, 0], Y[:, 1], c=cluster_member_colors, **plot_kwargs)
frame = plt.gca()
frame.get_xaxis().set_visible(False)
frame.get_yaxis().set_visible(False)
plt.show()
def bokeh_plot_2d(data, labels=None, probabilities=None, algorithm='tsne', algorithm_kwargs=None, untransformed_data=None):
if data.shape[1] > 2:
if data.shape[1] > 32 and algorithm != 'pca':
data = RandomizedPCA(n_components=32).fit_transform(data)
algorithm_class = algorithm_class_dict[algorithm]
if algorithm_kwargs:
algorithm = algorithm_class(**algorithm_kwargs)
else:
algorithm = algorithm_class(**algorithm_kwargs_dict[algorithm])
Y = algorithm.fit_transform(data)
else:
Y = data
color_palette = sns.color_palette('deep', len(np.unique(labels)))
if labels is not None:
cluster_colors = [color_palette[x] if x >= 0 else
(0.5, 0.5, 0.5) for
x in labels]
if probabilities is not None and np.all(np.isfinite(probabilities)):
cluster_member_colors = [sns.desaturate(x, p) for x, p in
zip(cluster_colors, probabilities)]
else:
cluster_member_colors = cluster_colors
cluster_member_colors = [mpl.colors.rgb2hex(rgb) for rgb in cluster_member_colors]
else:
cluster_member_colors = 'b'
if untransformed_data is not None:
original_columns = untransformed_data.columns.tolist()
df = untransformed_data.copy()
df['proj1'] = Y[:, 0]
df['proj2'] = Y[:, 1]
else:
original_columns = []
data_dict = {}
for column in xrange(data.shape[1]):
colname = 'x%i' % column
original_columns.append(colname)
data_dict[colname] = data[:, column]
data_dict.update({'proj1': Y[:, 0], 'proj2': Y[:, 1]})
df = pd.DataFrame(data_dict)
with bokeh_server(name='comp') as server:
q = scatter_with_hover(df, 'proj1', 'proj2', cols=original_columns, color=cluster_member_colors, alpha=0.5, size=5)
server.show(q)
def project_data(data, algorithm='tsne', algorithm_kwargs=None, n_components=2):
if data.shape[1] > n_components:
algorithm_class = algorithm_class_dict[algorithm]
if algorithm_kwargs:
algorithm_kwargs['n_components'] = n_components
algorithm = algorithm_class(**algorithm_kwargs)
else:
kwargs_dict = algorithm_kwargs_dict.copy()
kwargs_dict[algorithm]['n_components'] = n_components
algorithm = algorithm_class(**kwargs_dict[algorithm])
return algorithm.fit_transform(data)
else:
return data
def plot_parallel_coordinates(data, labels, n_components=10, algorithm='tsne', algorithm_kwargs=None, show_average=False):
df = data
df['y'] = labels
if show_average:
df = df.groupby('y').mean()
df['y'] = df.index
parallel_coordinates(df[ df['y'] != -1 ], 'y')
plt.show()
def prep_for_d3(data, cluster, filename):
Y = project_data(data.values, algorithm='tsne')
data['name'] = cluster.labels_
data['name'] = data['name'].apply(lambda x: 'group_{}'.format(x))
data['group'] = cluster.labels_
data['y1'] = Y[:, 0]
data['y2'] = Y[:, 1]
data.to_csv(filename, index_label='index')
``` |
{
"source": "joshlsastro/physics_simulators",
"score": 4
} |
#### File: joshlsastro/physics_simulators/electrostatics.py
```python
import time
import turtle
# DISCLAIMER: I never included special relativity, which means no magnetism.
# If you use this to simulate electrodynamics, your answer WILL be wrong.
wn = turtle.Screen()
class BasePhysicsBody(object):
"""Base Class for Physics Body."""
def __init__(self, x_0, v_0, m, color=None, shape='turtle'):
self.turtle = turtle.Turtle()
self.turtle.shape(shape)
if color != None:
self.turtle.color(color)
self.turtle.speed(0) # As fast as possible; update() takes care of motion
self.turtle.penup()
self.position = x_0
self.velocity = v_0
self.mass = m
self.turtle.goto(x_0)
def force(self):
"""Up to user."""
return [0, 0]
def update(self, time_step):
"""Updates all attributes for one time_step."""
a = [0,0]
F = self.force()
for i in [0,1]: # We have to update x and y
a[i] = self.force()[i] / self.mass
self.velocity[i] = self.velocity[i] + a[i]*time_step
self.position[i] = self.position[i] + self.velocity[i]*time_step # I'm lazy
self.turtle.goto(self.position) # Comment out the goto if you need the simulation to run really fast; you won't get the animation
class VectorField(object):
"""A vector field."""
def __init__(self, distribution):
"""Initializes VectorField where distribution is a list of BasePhysicsBody objects."""
self.distribution = distribution
self.ped = turtle.Turtle()
self.ped.hideturtle()
self.ped.speed(0)
self.ped.penup()
def value(self, position):
"""Returns value of vector field at position. Returns 0 by default."""
return [0,0]
def draw(self, position):
"""Draws vector field value at position."""
# Getting value at position
vector = self.value(position)
x = vector[0]
y = vector[1]
# Using self.ped to draw vector
self.ped.goto(position[0], position[1])
self.ped.pendown()
self.ped.goto(position[0]+x, position[1]+y)
self.ped.penup()
class ElectricField(VectorField):
def set_k(self, k):
self.k = k
def distance(self, position1, position2):
"""Use Pythagorean Theorem to find distance between two positions."""
x1, y1 = position1[0], position1[1]
x2, y2 = position2[0], position2[1]
return ((x2-x1)**2 + (y2-y1)**2) ** 0.5
def rhat(self, position_body, position_place):
"""Calculates the unit vector which, when multiplied by the distance,
gets one from position_body to position_place.
This is extremely useful for calculating electric field for statics."""
dx = position_place[0] - position_body[0]
dy = position_place[1] - position_body[1]
d = self.distance(position_body, position_place)
xHat = dx / d
yHat = dy / d
return [xHat, yHat]
def _one_body_value(self, position, body):
"""Finds electric field from specified body at position."""
if 'k' not in dir(self):
raise AttributeError("No k defined!")
# Coulomb's Law
d = self.distance(body.position, position)
if d == 0:
# Bodies don't exert electric force on themselves
return [0, 0]
else:
r = self.rhat(body.position, position)
amount = (self.k * body.charge) / (d**2)
return [amount*r[0], amount*r[1]]
def value(self, position):
all_vectors = []
for body in self.distribution:
all_vectors.append(self._one_body_value(position, body))
# Adding up vectors
x = 0
y = 0
for vector in all_vectors:
x += vector[0]
y += vector[1]
return [x, y]
class PointCharge(BasePhysicsBody):
"""A Point Charge."""
def set_charge(self, charge):
"""Sets charge and stops point charge."""
self.v_0 = [0,0]
self.charge = charge
def force(self):
return [0,0]
def draw_field_grid(vectorField, separation):
"""Draw vectorField with a grid separated by separation."""
global wn
xTotal, yTotal = wn.screensize()
for x in range(int(-xTotal/2), int(xTotal/2), separation):
for y in range(int(-yTotal/2), int(yTotal/2), separation):
vectorField.draw([x, y])
# <For User>
# Classes of VectorFields used
# Classes of physics bodies with Forces
# Define bodies here
plus = PointCharge([0,30], [0,0], 0, "red")
plus.set_charge(100)
minus = PointCharge([0,-30], [0,0], 0, "blue")
minus.set_charge(-100)
all_bodies = [plus, minus] # Place all bodies here
k = 100 # Coulomb's Constant
resolution = 20 # Resolution of grid
# </For User>
# Running Computation
# Setting up variables
E = ElectricField(all_bodies)
E.set_k(k)
draw_field_grid(E, resolution)
wn.mainloop()
``` |
{
"source": "Joshlucpoll/battleshipsGame",
"score": 4
} |
#### File: Joshlucpoll/battleshipsGame/assets.py
```python
import config
import main, board, game
import sys
import time
import random
import tkinter as tk
from tkinter import *
import tkinter
global shipPlaceChooser
class DisplayShip():
def __init__(self, position, ship, rotation):
#define variables
self.position = position
self.ship = ship
self.rotation = rotation
self.ships = config.ships
#gets the row and column of the button pressed
buttonInfo = config.PcoordinateButtons[position].grid_info()
self.buttonColumn = buttonInfo['column']
self.buttonRow = buttonInfo['row']
#creates variables according to the ship type - self.length is the amount of tiles it takes up and self.shipCode is used to identify the type of ship
if self.ship == "carrier":
self.shipCode = 0
self.length = 5
if self.ship == "battleship":
self.shipCode = 1
self.length = 4
if self.ship == "cruiser":
self.shipCode = 2
self.length = 3
if self.ship == "destroyer":
self.shipCode = 3
self.length = 2
if self.ship == "submarine":
self.shipCode = 4
self.length = 2
self.renderShips()
def shipPlacementChecker(self):
#creates list for the positions the ship will be rendered it
selectedPositions = []
#adds position to 'selectedPosition'
for i in range(self.length):
if self.rotation == "right":
selectedPositions.append(self.position+i)
if self.rotation == "down":
selectedPositions.append(self.position+(i*11))
if self.rotation == "left":
selectedPositions.append(self.position-i)
if self.rotation == "up":
selectedPositions.append(self.position-(i*11))
#compares 'selectedPosition' to each list in 'shipsPositions' and sees if there are any matches
#if it finds a match it returns 'False' if not 'True' is returned
for i in range(5):
check = any(item in selectedPositions for item in config.shipsPositions[i])
if check == True:
return False
return True
def renderShips(self):
#this if statement checks if any of the positions for the ship that's wanted to be renderedis occupied by another ship
if self.shipPlacementChecker() == True:
if self.rotation == "up":
#only allows you to place the ship in the playing grid
if self.buttonRow > (self.length - 1) and self.buttonColumn != 0 and self.buttonRow !=0:
#displays the ship tiles and adds the positions to 'shipsPositions'
for i in range(self.length):
config.PcoordinateButtons[self.position-(i*11)].config(image="")
config.PcoordinateButtons[self.position-(i*11)].config(image=self.ships[self.shipCode][3][i])
config.shipsPositions[self.shipCode].append(self.position-(i*11))
self.correctPlacement = True
else:
self.correctPlacement = False
if self.rotation == "right":
#only allows you to place the ship in the playing grid
if self.buttonColumn < (12 - self.length) and self.buttonColumn != 0 and self.buttonRow != 0:
#displays the ship tiles and adds the positions to 'shipsPositions'
for i in range(self.length):
config.PcoordinateButtons[self.position+(i)].config(image="")
config.PcoordinateButtons[self.position+(i)].config(image=self.ships[self.shipCode][0][i])
config.shipsPositions[self.shipCode].append(self.position+i)
self.correctPlacement = True
else:
self.correctPlacement = False
if self.rotation == "down":
#only allows you to place the ship in the playing grid
if self.buttonRow < (12 - self.length) and self.buttonRow != 0 and self.buttonColumn != 0:
#displays the ship tiles and adds the positions to 'shipsPositions'
for i in range(self.length):
config.PcoordinateButtons[self.position+(i*11)].config(image="")
config.PcoordinateButtons[self.position+(i*11)].config(image=self.ships[self.shipCode][1][i])
config.shipsPositions[self.shipCode].append(self.position+(i*11))
self.correctPlacement = True
else:
self.correctPlacement = False
if self.rotation == "left":
#only allows you to place the ship in the playing grid
if self.buttonColumn > (self.length - 1) and self.buttonColumn != 0 and self.buttonRow != 0:
#displays the ship tiles and adds the positions to 'shipsPositions'
for i in range(self.length):
config.PcoordinateButtons[self.position-(i)].config(image="")
config.PcoordinateButtons[self.position-(i)].config(image=self.ships[self.shipCode][2][i])
config.shipsPositions[self.shipCode].append(self.position-i)
self.correctPlacement = True
else:
self.correctPlacement = False
else:
self.correctPlacement = False
def assetsInitialisation():
#Loads in all necessary assets
config.arrowRight = PhotoImage(file="./assets/arrow/arrow0.gif")
config.arrowDown = PhotoImage(file="./assets/arrow/arrow1.gif")
config.arrowLeft = PhotoImage(file="./assets/arrow/arrow2.gif")
config.arrowUp = PhotoImage(file="./assets/arrow/arrow3.gif")
def filePathFormater(ship, number):
#creates variables for use in the filepath
for i in range(4):
if i == 0:
r = "right"
if i == 1:
r = "down"
if i == 2:
r = "left"
if i == 3:
r = "up"
#fetches and stores all ship tile photos with their 4 orientations in the 'ships' list
if ship == 0:
filePath = ("./assets/submarine/" + r + "/submarine" + str(number) + ".gif")
image = PhotoImage(file=filePath)
config.ships[4][i].append(image)
if ship == 1:
filePath = ("./assets/destroyer/" + r + "/destroyer" + str(number) + ".gif")
image = PhotoImage(file=filePath)
config.ships[3][i].append(image)
if ship == 2:
filePath = ("./assets/cruiser/" + r + "/cruiser" + str(number) + ".gif")
image = PhotoImage(file=filePath)
config.ships[2][i].append(image)
if ship == 3:
filePath = ("./assets/battleship/" + r + "/battleship" + str(number) + ".gif")
image = PhotoImage(file=filePath)
config.ships[1][i].append(image)
if ship == 4:
filePath = ("./assets/aircraftCarrier/" + r + "/aircraftCarrier" + str(number) + ".gif")
image = PhotoImage(file=filePath)
config.ships[0][i].append(image)
#ships = ["carrier", "battleship", "cruiser", "destroyer", "submarine"]
#Where the images of the ships are stored
config.ships = [[[], [], [], []], [[], [], [], []], [[], [], [], []], [[], [], [], []], [[], [], [], []]]
#Where the position of the ships are stored
config.shipsPositions = [[], [], [], [], []]
#i-1 = ship number
#b = image number
#This loop runs through each image number b times(the amount of images it has/ the number of squares it takes up)
#and forwards the numbers to the filePathFormater function
for i in range(6):
for b in range(i):
#As the submarine is the first ship but has two image files it has to re-loop and increment the ship number
if i == 1:
for x in range(2):
b = b+x
filePathFormater(i-1, b)
else:
filePathFormater(i-1, b)
placeShips()
def placeShips():
global shipPlaceChooser
#variable initiated so program can know which ship was rendered last so therefore the ship that needs to be rendered next
config.numOfShipsPlaced = 0
#initiates list where labels that prompt the user to place the ships are stored
config.shipPlacePrompt = []
#labels are created to prompt user
carrierPrompt = Label(config.commandWin, text="ADMIRAL, PLACE YOUR\nAIRCRAFT CARRIER\n5 LONG", bg="grey", fg="white", height=5, width=30, font=("Courier"))
battleshipPrompt = Label(config.commandWin, text="ADMIRAL, PLACE YOUR\nBATTLESHIP\n4 LONG", bg="grey", fg="white", height=5, width=30, font=("Courier"))
cruiserPrompt = Label(config.commandWin, text="ADMIRAL, PLACE YOUR\nCRUISER\n3 LONG", bg="grey", fg="white", height=5, width=30, font=("Courier"))
destroyerPrompt = Label(config.commandWin, text="ADMIRAL, PLACE YOUR\nDESTROYER\n2 LONG", bg="grey", fg="white", height=5, width=30, font=("Courier"))
submarinePrompt = Label(config.commandWin, text="ADMIRAL, PLACE YOUR\nSUBMARINE\n2 LONG", bg="grey", fg="white", height=5, width=30, font=("Courier"))
#labels are added to the list
config.shipPlacePrompt.append(carrierPrompt)
config.shipPlacePrompt.append(battleshipPrompt)
config.shipPlacePrompt.append(cruiserPrompt)
config.shipPlacePrompt.append(destroyerPrompt)
config.shipPlacePrompt.append(submarinePrompt)
#first prompt is shown
carrierPrompt.pack()
def placeCarrier(position, rotation):
#creates the object 'carrier' for the class 'DisplayShip'
carrier = DisplayShip(position, "carrier", rotation)
#if the ship was correctly positioned, 'config.numOfShipsPlaced' is increment and the prompt is refreshed for the new ship
if carrier.correctPlacement == True:
config.numOfShipsPlaced = 1
config.shipPlacePrompt[0].destroy()
config.shipPlacePrompt[1].pack()
def placeBattleship(position, rotation):
#creates the object 'battleship' for the class 'DisplayShip'
battleship = DisplayShip(position, "battleship", rotation)
#if the ship was correctly positioned, 'config.numOfShipsPlaced' is increment and the prompt is refreshed for the new ship
if battleship.correctPlacement == True:
config.numOfShipsPlaced = 2
config.shipPlacePrompt[1].destroy()
config.shipPlacePrompt[2].pack()
def placeCruiser(position, rotation):
#creates the object 'cruiser' for the class 'DisplayShip'
cruiser = DisplayShip(position, "cruiser", rotation)
#if the ship was correctly positioned, 'config.numOfShipsPlaced' is increment and the prompt is refreshed for the new ship
if cruiser.correctPlacement == True:
config.numOfShipsPlaced = 3
config.shipPlacePrompt[2].destroy()
config.shipPlacePrompt[3].pack()
def placeDestroyer(position, rotation):
#creates the object 'destroyer' for the class 'DisplayShip'
destroyer = DisplayShip(position, "destroyer", rotation)
#if the ship was correctly positioned, 'config.numOfShipsPlaced' is increment and the prompt is refreshed for the new ship
if destroyer.correctPlacement == True:
config.numOfShipsPlaced = 4
config.shipPlacePrompt[3].destroy()
config.shipPlacePrompt[4].pack()
def placeSubmarine(position, rotation):
#creates the object 'submarine' for the class 'DisplayShip'
submarine = DisplayShip(position, "submarine", rotation)
#if the ship was correctly positioned, 'config.numOfShipsPlaced' is increment and the prompt is refreshed for the new ship
if submarine.correctPlacement == True:
config.numOfShipsPlaced = 5
config.shipPlacePrompt[4].destroy()
config.gameStartUpComplete = True
game.gamePlaying()
def shipPlaceChooser(position, rotation):
#decides which ship needs to be rendered next and calls the appropriate function
if config.numOfShipsPlaced == 0:
placeCarrier(position, rotation)
if config.numOfShipsPlaced == 1:
placeBattleship(position, rotation)
if config.numOfShipsPlaced == 2:
placeCruiser(position, rotation)
if config.numOfShipsPlaced == 3:
placeDestroyer(position, rotation)
if config.numOfShipsPlaced == 4:
placeSubmarine(position, rotation)
loopCondition = False
numOfAiShips = 0
config.AiShipsPositions = [[], [], [], [], []]
placeAiShips(loopCondition, numOfAiShips)
def placeAiShips(loopCondition, numOfAiShips):
def AiShipPlacementChecker(rotation, position, length):
#global config.AiShipsPositions
#creates list for the positions the ship will be rendered it
selectedPositions = []
#adds position to 'selectedPosition'
for i in range(length):
if rotation == 0: #(up)
selectedPositions.append(position-(i*11))
if rotation == 1: #(right)
selectedPositions.append(position+i)
if rotation == 2: #(down)
selectedPositions.append(position+(i*11))
if rotation == 3: #(left)
selectedPositions.append(position-i)
#compares 'selectedPosition' to each list in 'shipsPositions' and sees if there are any matches
#if it finds a match it returns 'False' if not 'True' is returned
for i in range(5):
check = any(item in selectedPositions for item in config.AiShipsPositions[i])
if check == True:
return False
return True
#this acts as a while loop until 5 ships have been placed by the AI
if loopCondition == False:
#assigns the right length to the ship
if numOfAiShips == 0: #carrier
length = 5
if numOfAiShips == 1: #battleship
length = 4
if numOfAiShips == 2: #cruiser
length = 3
if numOfAiShips == 3: #destroyer
length = 2
if numOfAiShips == 4: #submarine
length = 2
#creates a random position and rotation
ranNum = random.randint(1, 121)
ranRotation = random.randint(0,3)
#gets the row and column of the button pressed
buttonInfo = config.EcoordinateButtons[ranNum].grid_info()
buttonColumn = buttonInfo['column']
buttonRow = buttonInfo['row']
#this if statement checks if any of the positions for the ship that's wanted to be positioned is occupied by another ship
if AiShipPlacementChecker(ranRotation, ranNum, length) == True:
#Checks to see if the random position is in the playing area
if buttonColumn != 0 and buttonRow != 0:
if ranRotation == 0: #(up)
#checks to see if the ships position would fit where it has been placed
if buttonRow > (length - 1) and buttonColumn != 0 and buttonRow !=0:
for i in range(length):
config.AiShipsPositions[numOfAiShips].append(ranNum-(i*11))
numOfAiShips += 1
if ranRotation == 1: #(right)
#checks to see if the ships position would fit where it has been placed
if buttonColumn < (12 - length) and buttonColumn != 0 and buttonRow !=0:
for i in range(length):
config.AiShipsPositions[numOfAiShips].append(ranNum+i)
numOfAiShips += 1
if ranRotation == 2: #(down)
#checks to see if the ships position would fit where it has been placed
if buttonRow < (12 - length) and buttonColumn != 0 and buttonRow !=0:
for i in range(length):
config.AiShipsPositions[numOfAiShips].append(ranNum+(i*11))
numOfAiShips += 1
if ranRotation == 3: #(left)
#checks to see if the ships position would fit where it has been placed
if buttonColumn > (length - 1) and buttonColumn != 0 and buttonRow !=0:
for i in range(length):
config.AiShipsPositions[numOfAiShips].append(ranNum-i)
numOfAiShips += 1
#checks to see if 5 ships have been placed, if it has the loop is broken if not the function is repeated
if numOfAiShips < 5:
placeAiShips(loopCondition, numOfAiShips)
else:
loopCondition = True
``` |
{
"source": "joshluongo/sentry-discord",
"score": 2
} |
#### File: src/sentry_discord/plugin.py
```python
from __future__ import absolute_import
import logging
import sentry_discord
from django import forms
from django.utils.html import escape
from requests import HTTPError
from sentry import http
from sentry.plugins.bases import notify
from sentry.utils import json
LEVEL_TO_COLOR = {
"debug": "cfd3da",
"info": "2788ce",
"warning": "f18500",
"error": "f43f20",
"fatal": "d20f2a",
}
class DiscordOptionsForm(notify.NotificationConfigurationForm):
webhook_url = forms.CharField(
max_length=255,
label='Discord Webhook URL',
widget=forms.TextInput(
attrs={'class': 'span6', 'placeholder': 'e.g. https://discordapp.com/api/webhooks/x/x'}),
help_text='You can get this under the channel options in Discord.',
required=True,
)
class DiscordPlugin(notify.NotificationPlugin):
author = '<NAME>'
author_url = 'https://jrapps.com.au'
resource_links = (
('Bug Tracker', 'https://github.com/joshluongo/sentry-discord/issues'),
('Source', 'https://github.com/joshluongo/sentry-discord'),
)
title = 'Discord'
slug = 'discord'
description = 'Create Discord alerts out of notifications.'
conf_key = 'discord'
version = sentry_discord.VERSION
project_conf_form = DiscordOptionsForm
logger = logging.getLogger('sentry.plugins.discord')
def is_configured(self, project):
return bool(self.get_option("webhook_url", project))
def get_form_initial(self, project=None):
return {
'webhook_url': 'https://discordapp.com/api/webhooks/x/x',
}
def color_for_event(self, event):
return "#" + LEVEL_TO_COLOR.get(event.get_tag("level"), "error")
def notify(self, notification):
event = notification.event
group = event.group
project = group.project
if not self.is_configured(project):
return
webhook = self.get_option("webhook_url", project)
# Make it a slack comptaible webhook because im lazy.
if not webhook.endswith("/slack"):
webhook += "/slack"
username = "Sentry"
icon_url = ""
channel = ""
title = event.title.encode("utf-8")
# TODO(dcramer): we'd like this to be the event culprit, but Sentry
# does not currently retain it
if group.culprit:
culprit = group.culprit.encode("utf-8")
else:
culprit = None
project_name = project.get_full_name().encode("utf-8")
fields = []
# They can be the same if there is no culprit
# So we set culprit to an empty string instead of duplicating the text
fields.append({"title": "Culprit", "value": culprit, "short": False})
fields.append({"title": "Project", "value": project_name, "short": True})
payload = {
"attachments": [
{
"fallback": "[%s] %s" % (project_name, title),
"title": title,
"title_link": group.get_absolute_url(params={"referrer": "slack"}),
"color": self.color_for_event(event),
"fields": fields,
}
]
}
if username:
payload["username"] = username.encode("utf-8")
if channel:
payload["channel"] = channel
if icon_url:
payload["icon_url"] = icon_url
values = {"payload": json.dumps(payload)}
# Apparently we've stored some bad data from before we used `URLField`.
webhook = webhook.strip(" ")
return http.safe_urlopen(webhook, method="POST", data=values, timeout=5)
``` |
{
"source": "joshluster/Home-Assistant",
"score": 2
} |
#### File: components/cloud/test_http_api.py
```python
import asyncio
from unittest.mock import patch, MagicMock
import pytest
from jose import jwt
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.cloud import DOMAIN, auth_api, iot
from tests.common import mock_coro
@pytest.fixture
def cloud_client(hass, test_client):
"""Fixture that can fetch from the cloud client."""
with patch('homeassistant.components.cloud.Cloud.async_start',
return_value=mock_coro()):
hass.loop.run_until_complete(async_setup_component(hass, 'cloud', {
'cloud': {
'mode': 'development',
'cognito_client_id': 'cognito_client_id',
'user_pool_id': 'user_pool_id',
'region': 'region',
'relayer': 'relayer',
}
}))
hass.data['cloud']._decode_claims = \
lambda token: jwt.get_unverified_claims(token)
with patch('homeassistant.components.cloud.Cloud.write_user_info'):
yield hass.loop.run_until_complete(test_client(hass.http.app))
@pytest.fixture
def mock_cognito():
"""Mock warrant."""
with patch('homeassistant.components.cloud.auth_api._cognito') as mock_cog:
yield mock_cog()
@asyncio.coroutine
def test_account_view_no_account(cloud_client):
"""Test fetching account if no account available."""
req = yield from cloud_client.get('/api/cloud/account')
assert req.status == 400
@asyncio.coroutine
def test_account_view(hass, cloud_client):
"""Test fetching account if no account available."""
hass.data[DOMAIN].id_token = jwt.encode({
'email': '<EMAIL>',
'custom:sub-exp': '2018-01-03'
}, 'test')
hass.data[DOMAIN].iot.state = iot.STATE_CONNECTED
req = yield from cloud_client.get('/api/cloud/account')
assert req.status == 200
result = yield from req.json()
assert result == {
'email': '<EMAIL>',
'sub_exp': '2018-01-03',
'cloud': iot.STATE_CONNECTED,
}
@asyncio.coroutine
def test_login_view(hass, cloud_client, mock_cognito):
"""Test logging in."""
mock_cognito.id_token = jwt.encode({
'email': '<EMAIL>',
'custom:sub-exp': '2018-01-03'
}, 'test')
mock_cognito.access_token = 'access_token'
mock_cognito.refresh_token = '<PASSWORD>_token'
with patch('homeassistant.components.cloud.iot.CloudIoT.'
'connect') as mock_connect, \
patch('homeassistant.components.cloud.auth_api._authenticate',
return_value=mock_cognito) as mock_auth:
req = yield from cloud_client.post('/api/cloud/login', json={
'email': 'my_username',
'password': '<PASSWORD>'
})
assert req.status == 200
result = yield from req.json()
assert result['email'] == '<EMAIL>'
assert result['sub_exp'] == '2018-01-03'
assert len(mock_connect.mock_calls) == 1
assert len(mock_auth.mock_calls) == 1
cloud, result_user, result_pass = mock_auth.mock_calls[0][1]
assert result_user == 'my_username'
assert result_pass == '<PASSWORD>'
@asyncio.coroutine
def test_login_view_invalid_json(cloud_client):
"""Try logging in with invalid JSON."""
with patch('homeassistant.components.cloud.auth_api.login') as mock_login:
req = yield from cloud_client.post('/api/cloud/login', data='Not JSON')
assert req.status == 400
assert len(mock_login.mock_calls) == 0
@asyncio.coroutine
def test_login_view_invalid_schema(cloud_client):
"""Try logging in with invalid schema."""
with patch('homeassistant.components.cloud.auth_api.login') as mock_login:
req = yield from cloud_client.post('/api/cloud/login', json={
'invalid': 'schema'
})
assert req.status == 400
assert len(mock_login.mock_calls) == 0
@asyncio.coroutine
def test_login_view_request_timeout(cloud_client):
"""Test request timeout while trying to log in."""
with patch('homeassistant.components.cloud.auth_api.login',
side_effect=asyncio.TimeoutError):
req = yield from cloud_client.post('/api/cloud/login', json={
'email': 'my_username',
'password': '<PASSWORD>'
})
assert req.status == 502
@asyncio.coroutine
def test_login_view_invalid_credentials(cloud_client):
"""Test logging in with invalid credentials."""
with patch('homeassistant.components.cloud.auth_api.login',
side_effect=auth_api.Unauthenticated):
req = yield from cloud_client.post('/api/cloud/login', json={
'email': 'my_username',
'password': '<PASSWORD>'
})
assert req.status == 401
@asyncio.coroutine
def test_login_view_unknown_error(cloud_client):
"""Test unknown error while logging in."""
with patch('homeassistant.components.cloud.auth_api.login',
side_effect=auth_api.UnknownError):
req = yield from cloud_client.post('/api/cloud/login', json={
'email': 'my_username',
'password': '<PASSWORD>'
})
assert req.status == 502
@asyncio.coroutine
def test_logout_view(hass, cloud_client):
"""Test logging out."""
cloud = hass.data['cloud'] = MagicMock()
cloud.logout.return_value = mock_coro()
req = yield from cloud_client.post('/api/cloud/logout')
assert req.status == 200
data = yield from req.json()
assert data == {'message': 'ok'}
assert len(cloud.logout.mock_calls) == 1
@asyncio.coroutine
def test_logout_view_request_timeout(hass, cloud_client):
"""Test timeout while logging out."""
cloud = hass.data['cloud'] = MagicMock()
cloud.logout.side_effect = asyncio.TimeoutError
req = yield from cloud_client.post('/api/cloud/logout')
assert req.status == 502
@asyncio.coroutine
def test_logout_view_unknown_error(hass, cloud_client):
"""Test unknown error while logging out."""
cloud = hass.data['cloud'] = MagicMock()
cloud.logout.side_effect = auth_api.UnknownError
req = yield from cloud_client.post('/api/cloud/logout')
assert req.status == 502
@asyncio.coroutine
def test_register_view(mock_cognito, cloud_client):
"""Test logging out."""
req = yield from cloud_client.post('/api/cloud/register', json={
'email': '<EMAIL>',
'password': '<PASSWORD>'
})
assert req.status == 200
assert len(mock_cognito.register.mock_calls) == 1
result_email, result_pass = mock_cognito.register.mock_calls[0][1]
assert result_email == '<EMAIL>'
assert result_pass == '<PASSWORD>'
@asyncio.coroutine
def test_register_view_bad_data(mock_cognito, cloud_client):
"""Test logging out."""
req = yield from cloud_client.post('/api/cloud/register', json={
'email': '<EMAIL>',
'not_password': '<PASSWORD>'
})
assert req.status == 400
assert len(mock_cognito.logout.mock_calls) == 0
@asyncio.coroutine
def test_register_view_request_timeout(mock_cognito, cloud_client):
"""Test timeout while logging out."""
mock_cognito.register.side_effect = asyncio.TimeoutError
req = yield from cloud_client.post('/api/cloud/register', json={
'email': '<EMAIL>',
'password': '<PASSWORD>'
})
assert req.status == 502
@asyncio.coroutine
def test_register_view_unknown_error(mock_cognito, cloud_client):
"""Test unknown error while logging out."""
mock_cognito.register.side_effect = auth_api.UnknownError
req = yield from cloud_client.post('/api/cloud/register', json={
'email': '<EMAIL>',
'password': '<PASSWORD>'
})
assert req.status == 502
@asyncio.coroutine
def test_forgot_password_view(mock_cognito, cloud_client):
"""Test logging out."""
req = yield from cloud_client.post('/api/cloud/forgot_password', json={
'email': '<EMAIL>',
})
assert req.status == 200
assert len(mock_cognito.initiate_forgot_password.mock_calls) == 1
@asyncio.coroutine
def test_forgot_password_view_bad_data(mock_cognito, cloud_client):
"""Test logging out."""
req = yield from cloud_client.post('/api/cloud/forgot_password', json={
'not_email': '<EMAIL>',
})
assert req.status == 400
assert len(mock_cognito.initiate_forgot_password.mock_calls) == 0
@asyncio.coroutine
def test_forgot_password_view_request_timeout(mock_cognito, cloud_client):
"""Test timeout while logging out."""
mock_cognito.initiate_forgot_password.side_effect = asyncio.TimeoutError
req = yield from cloud_client.post('/api/cloud/forgot_password', json={
'email': '<EMAIL>',
})
assert req.status == 502
@asyncio.coroutine
def test_forgot_password_view_unknown_error(mock_cognito, cloud_client):
"""Test unknown error while logging out."""
mock_cognito.initiate_forgot_password.side_effect = auth_api.UnknownError
req = yield from cloud_client.post('/api/cloud/forgot_password', json={
'email': '<EMAIL>',
})
assert req.status == 502
@asyncio.coroutine
def test_resend_confirm_view(mock_cognito, cloud_client):
"""Test logging out."""
req = yield from cloud_client.post('/api/cloud/resend_confirm', json={
'email': '<EMAIL>',
})
assert req.status == 200
assert len(mock_cognito.client.resend_confirmation_code.mock_calls) == 1
@asyncio.coroutine
def test_resend_confirm_view_bad_data(mock_cognito, cloud_client):
"""Test logging out."""
req = yield from cloud_client.post('/api/cloud/resend_confirm', json={
'not_email': '<EMAIL>',
})
assert req.status == 400
assert len(mock_cognito.client.resend_confirmation_code.mock_calls) == 0
@asyncio.coroutine
def test_resend_confirm_view_request_timeout(mock_cognito, cloud_client):
"""Test timeout while logging out."""
mock_cognito.client.resend_confirmation_code.side_effect = \
asyncio.TimeoutError
req = yield from cloud_client.post('/api/cloud/resend_confirm', json={
'email': '<EMAIL>',
})
assert req.status == 502
@asyncio.coroutine
def test_resend_confirm_view_unknown_error(mock_cognito, cloud_client):
"""Test unknown error while logging out."""
mock_cognito.client.resend_confirmation_code.side_effect = \
auth_api.UnknownError
req = yield from cloud_client.post('/api/cloud/resend_confirm', json={
'email': '<EMAIL>',
})
assert req.status == 502
```
#### File: components/homekit/test_accessories.py
```python
from unittest.mock import patch
# pylint: disable=unused-import
from pyhap.loader import get_serv_loader, get_char_loader # noqa F401
from homeassistant.components.homekit.accessories import (
set_accessory_info, add_preload_service, override_properties,
HomeAccessory, HomeBridge)
from homeassistant.components.homekit.const import (
SERV_ACCESSORY_INFO, SERV_BRIDGING_STATE,
CHAR_MODEL, CHAR_MANUFACTURER, CHAR_SERIAL_NUMBER)
from tests.mock.homekit import (
get_patch_paths, mock_preload_service,
MockTypeLoader, MockAccessory, MockService, MockChar)
PATH_SERV = 'pyhap.loader.get_serv_loader'
PATH_CHAR = 'pyhap.loader.get_char_loader'
PATH_ACC, _ = get_patch_paths()
@patch(PATH_CHAR, return_value=MockTypeLoader('char'))
@patch(PATH_SERV, return_value=MockTypeLoader('service'))
def test_add_preload_service(mock_serv, mock_char):
"""Test method add_preload_service.
The methods 'get_serv_loader' and 'get_char_loader' are mocked.
"""
acc = MockAccessory('Accessory')
serv = add_preload_service(acc, 'TestService',
['TestChar', 'TestChar2'],
['TestOptChar', 'TestOptChar2'])
assert serv.display_name == 'TestService'
assert len(serv.characteristics) == 2
assert len(serv.opt_characteristics) == 2
acc.services = []
serv = add_preload_service(acc, 'TestService')
assert not serv.characteristics
assert not serv.opt_characteristics
acc.services = []
serv = add_preload_service(acc, 'TestService',
'TestChar', 'TestOptChar')
assert len(serv.characteristics) == 1
assert len(serv.opt_characteristics) == 1
assert serv.characteristics[0].display_name == 'TestChar'
assert serv.opt_characteristics[0].display_name == 'TestOptChar'
def test_override_properties():
"""Test override of characteristic properties with MockChar."""
char = MockChar('TestChar')
new_prop = {1: 'Test', 2: 'Demo'}
override_properties(char, new_prop)
assert char.properties == new_prop
def test_set_accessory_info():
"""Test setting of basic accessory information with MockAccessory."""
acc = MockAccessory('Accessory')
set_accessory_info(acc, 'model', 'manufacturer', '0000')
assert len(acc.services) == 1
serv = acc.services[0]
assert serv.display_name == SERV_ACCESSORY_INFO
assert len(serv.characteristics) == 3
chars = serv.characteristics
assert chars[0].display_name == CHAR_MODEL
assert chars[0].value == 'model'
assert chars[1].display_name == CHAR_MANUFACTURER
assert chars[1].value == 'manufacturer'
assert chars[2].display_name == CHAR_SERIAL_NUMBER
assert chars[2].value == '0000'
@patch(PATH_ACC, side_effect=mock_preload_service)
def test_home_accessory(mock_pre_serv):
"""Test initializing a HomeAccessory object."""
acc = HomeAccessory('TestAccessory', 'test.accessory', 'WINDOW')
assert acc.display_name == 'TestAccessory'
assert acc.category == 13 # Category.WINDOW
assert len(acc.services) == 1
serv = acc.services[0]
assert serv.display_name == SERV_ACCESSORY_INFO
char_model = serv.get_characteristic(CHAR_MODEL)
assert char_model.get_value() == 'test.accessory'
@patch(PATH_ACC, side_effect=mock_preload_service)
def test_home_bridge(mock_pre_serv):
"""Test initializing a HomeBridge object."""
bridge = HomeBridge('TestBridge', 'test.bridge', b'123-45-678')
assert bridge.display_name == 'TestBridge'
assert bridge.pincode == b'123-45-678'
assert len(bridge.services) == 2
assert bridge.services[0].display_name == SERV_ACCESSORY_INFO
assert bridge.services[1].display_name == SERV_BRIDGING_STATE
char_model = bridge.services[0].get_characteristic(CHAR_MODEL)
assert char_model.get_value() == 'test.bridge'
def test_mock_accessory():
"""Test attributes and functions of a MockAccessory."""
acc = MockAccessory('TestAcc')
serv = MockService('TestServ')
acc.add_service(serv)
assert acc.display_name == 'TestAcc'
assert len(acc.services) == 1
assert acc.get_service('TestServ') == serv
assert acc.get_service('NewServ').display_name == 'NewServ'
assert len(acc.services) == 2
def test_mock_service():
"""Test attributes and functions of a MockService."""
serv = MockService('TestServ')
char = MockChar('TestChar')
opt_char = MockChar('TestOptChar')
serv.add_characteristic(char)
serv.add_opt_characteristic(opt_char)
assert serv.display_name == 'TestServ'
assert len(serv.characteristics) == 1
assert len(serv.opt_characteristics) == 1
assert serv.get_characteristic('TestChar') == char
assert serv.get_characteristic('TestOptChar') == opt_char
assert serv.get_characteristic('NewChar').display_name == 'NewChar'
assert len(serv.characteristics) == 2
def test_mock_char():
"""Test attributes and functions of a MockChar."""
def callback_method(value):
"""Provide a callback options for 'set_value' method."""
assert value == 'With callback'
char = MockChar('TestChar')
char.set_value('Value')
assert char.display_name == 'TestChar'
assert char.get_value() == 'Value'
char.setter_callback = callback_method
char.set_value('With callback')
``` |
{
"source": "joshlvmh/iqtree_arm_neon",
"score": 3
} |
#### File: IQ-TREE/test_scripts/jobmanager.py
```python
import sys, os, time, multiprocessing, optparse
import subprocess, logging, datetime
def cpu_count():
''' Returns the number of CPUs in the system
'''
num = 1
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
pass
elif sys.platform == 'darwin':
try:
num = int(os.popen('sysctl -n hw.ncpu').read())
except ValueError:
pass
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
pass
return num
def exec_commands(cmds, name, num_cpus):
''' Exec commands in parallel in multiple process
(as much as we have CPU)
'''
if not cmds: return # empty list
def done(p):
return p.poll() is not None
def success(p):
return p.returncode == 0
def fail():
sys.exit(1)
# max_task = cpu_count()
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
my_time = datetime.datetime.now()
handler = logging.FileHandler(name + "." + str(my_time.year) + str(my_time.month) + str(my_time.day) +
str(my_time.hour) + str(my_time.minute) + str(my_time.second) + ".log")
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
max_task = multiprocessing.cpu_count()
logger.info("Available CPUs = " + str(max_task) + " / using " + str(num_cpus) + " CPUs")
logger.info("Number of jobs = " + str(len(cmds)))
processes = []
while True:
while cmds and len(processes) < num_cpus:
task = cmds.pop(0)
#print subprocess.list2cmdline(task)
task_id, cmd = task.split(" ", 1)
logger.info("Executing job " + task_id + ": " + cmd.strip())
#print cmd
task_output = open(task_id + ".out", "w")
time_cmd = "time " + cmd
processes.append([subprocess.Popen(time_cmd, stderr=subprocess.STDOUT, stdout=task_output, shell=True), task_id])
for p in processes:
if done(p[0]):
if success(p[0]):
#print "Process with ID = ", p.pid, " has finished"
#print "number of processes before removal: ", len(processes)
logger.info("Job " + p[1] + " has finished")
processes.remove(p)
#print "number of processes after removal: ", len(processes)
else:
logger.info("Job " + p[1] + " finished with ERROR CODE " + str(p[0].returncode))
processes.remove(p)
if not processes and not cmds:
break
else:
time.sleep(5)
if __name__ == '__main__':
max_cores = multiprocessing.cpu_count()
usage = "USAGE: %prog [options]"
parser = optparse.OptionParser(usage=usage)
parser.add_option('-f','--cmd', dest="cmd", help='File containing all commands')
parser.add_option('-c','--cpu', dest="cpu", help='Number of CPU to use', default=max_cores)
(options, args) = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
exit(0)
jobs = open(options.cmd, "r").readlines()
exec_commands(jobs, options.cmd, int(options.cpu))
``` |
{
"source": "joshlyman/Josh-LeetCode",
"score": 4
} |
#### File: joshlyman/Josh-LeetCode/005_Longest_Palindromic_Substring.py
```python
class Solution:
def longestPalindrome(self, s: str) -> str:
# current palindrome
palindrome = ""
for i in range(len(s)):
# "aba"
len1 = len(self.getlongestPalindrome(s,i,i))
if len1>len(palindrome):
palindrome = self.getlongestPalindrome(s,i,i)
# "abba"
len2 = len(self.getlongestPalindrome(s,i,i+1))
if len2>len(palindrome):
palindrome = self.getlongestPalindrome(s,i,i+1)
return palindrome
# for each element, check left and right directions to see if it is a palindrome
def getlongestPalindrome(self,s,left,right):
while left>=0 and right < len(s) and s[left] == s[right]:
left-=1
right+=1
return s[left+1:right]
# Time: O(N^2)
# Space: O(N)
```
#### File: joshlyman/Josh-LeetCode/015_3_Sum.py
```python
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
nums = sorted(nums)
results = []
for i in range(len(nums)):
if i > 0 and nums[i] == nums[i-1]:
continue
self.find_two_sums(nums,i+1, len(nums)-1, -nums[i], results)
return results
def find_two_sums(self,nums,left,right, target, results):
while left < right:
if nums[left] + nums[right] == target:
results.append([-target, nums[left], nums[right]])
right -=1
left +=1
while left < right and nums[left] == nums[left-1]:
left +=1
while left < right and nums[right] == nums[right+1]:
right -=1
elif nums[left] + nums[right] > target:
right -=1
else:
left +=1
# Time: O(n^2): O(n^2) + Sorting: O(nlogn)
# Space: O(n): possible O(logn)
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
# first sort arrays then use 2 Sum II 2 pointers or 2 Sum hash table to do
res = []
nums.sort()
for i in range(len(nums)):
if nums[i]>0:
break
# first element must be started from 0 or cannot be euqal to previous element because it will give same result
if i == 0 or nums[i-1]!= nums[i]:
self.twoSumII(nums,i,res)
return res
# repeat 2 Sum or 2 Sum II solution here
def twoSumII(self,nums:List[int],i:int,res:List[List[int]]):
low , high = i+1,len(nums)-1
while low < high:
sum = nums[i] + nums[low] + nums[high]
if sum<0:
low+=1
elif sum >0:
high-=1
else:
res.append([nums[i],nums[low],nums[high]])
# continue
low +=1
high -=1
# avoid duplicate set
while low < high and nums[low] == nums[low-1]:
low+=1
# Time: O(n^2): O(n^2) + Sorting: O(nlogn)
# Space: O(n): possible O(logn)
# V2
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
n = len(nums)
nums.sort()
ans = []
for i in range(len(nums)):
if i>0 and nums[i-1] == nums[i]:
continue
l,r = i+1,n-1
while l<r:
temp = nums[i] + nums[l] + nums[r]
if temp == 0:
ans.append([nums[i],nums[l],nums[r]])
l+=1
r-=1
while l<r and nums[l] == nums[l-1]:
l+=1
while l<r and nums[r] == nums[r+1]:
r-=1
elif temp>0:
r-=1
else:
l+=1
return ans
```
#### File: joshlyman/Josh-LeetCode/027_Remove_Element.py
```python
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
if not nums:
return 0
count = 0
for i in range(len(nums)):
if nums[i]!=val:
nums[count] = nums[i]
count+=1
return count
# Time:O(n)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/029_Divide_Two_Integers.py
```python
def divide(self, dividend: int, divisor: int) -> int:
# Constants.
MAX_INT = 2147483647 # 2**31 - 1
MIN_INT = -2147483648 # -2**31
HALF_MIN_INT = -1073741824 # MIN_INT // 2
# Special case: overflow.
if dividend == MIN_INT and divisor == -1:
return MAX_INT
# We need to convert both numbers to negatives.
# Also, we count the number of negatives signs.
negatives = 2
if dividend > 0:
negatives -= 1
dividend = -dividend
if divisor > 0:
negatives -= 1
divisor = -divisor
quotient = 0
# Once the divisor is bigger than the current dividend,
# we can't fit any more copies of the divisor into it anymore */
while divisor >= dividend:
# We know it'll fit at least once as divivend >= divisor.
# Note: We use a negative powerOfTwo as it's possible we might have
# the case divide(INT_MIN, -1). */
powerOfTwo = -1
value = divisor
# Check if double the current value is too big. If not, continue doubling.
# If it is too big, stop doubling and continue with the next step */
while value >= HALF_MIN_INT and value + value >= dividend:
value += value;
powerOfTwo += powerOfTwo
# We have been able to subtract divisor another powerOfTwo times.
quotient += powerOfTwo
# Remove value so far so that we can continue the process with remainder.
dividend -= value
# If there was originally one negative sign, then
# the quotient remains negative. Otherwise, switch
# it to positive.
return -quotient if negatives != 1 else quotient
# Time: O(log^2N)
# Space:O(1)
# use bit shifting
# x << y means multiplying x by 2**y
# x >> y means dividing x by 2**y
def divide(self, a, b):
sig = (a < 0) == (b < 0)
a, b, res = abs(a), abs(b), 0
while a >= b:
x = 0
while a >= b << (x + 1): x += 1
res += 1 << x
a -= b << x
# -2**31 <= dividend, divisor <= 2**31 - 1
return min(res if sig else -res, 2147483647)
# Time: O(log^2 N)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/040_Combination_Sum_II.py
```python
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
candidates.sort()
results = []
def backtrack(remain,comb,start):
if remain == 0:
# make a deep copy of the current combination
results.append(list(comb))
return
# not satify condition
elif remain <0:
# exceed the scope, stop exploration.
return
for i in range(start,len(candidates)):
# # Very important here! We don't use `i > 0`
# because we always want to count the first element in this
# recursive step even if it is the same as one before.
# To avoid overcounting, we just ignore the duplicates after the first element.
if i > start and candidates[i] == candidates[i-1]:
continue
# add the number into the combination
comb.append(candidates[i])
# dont give current number another chance, just moving on
backtrack(remain-candidates[i],comb,i+1)
# backtrack, remove the number from the combination
comb.pop()
backtrack(target,[],0)
return results
# Time: O(2^N)
# Let N be the number of candidates, so here should be total # of combinations
# Space:O(N)
# V2
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
res = []
# difference with combination sum I
candidates.sort()
self.backtrack(0,candidates,target,[],res)
return res
def backtrack(self,start,candidates,target,path,res):
if target <0:
return
if target ==0:
res.append(path)
return
for i in range(start,len(candidates)):
# difference with combination sum I
if i > start and candidates[i] == candidates[i-1]:
continue
self.backtrack(i+1,candidates,target-candidates[i],path+[candidates[i]],res)
```
#### File: joshlyman/Josh-LeetCode/051_N_Queens.py
```python
class Solution:
def solveNQueens(self, n: int) -> List[List[str]]:
def DFS(queens, xy_dif, xy_sum):
"""
temp = [["." * i + "Q" + "." * (n - i - 1) for i in queens]]
for t in temp:
for tt in t:
print(tt)
print("\n")
print("\n")
"""
p = len(queens) # p is the index of row
if p == n:
# finish all rows and
result.append(queens)
return None
for q in range(n): # q is the index of col
# queens stores those used cols, for example, [0,2,4,1] means these cols have been used
# xy_dif is the diagonal 1
# xy_sum is the diagonal 2
if q not in queens and p - q not in xy_dif and p + q not in xy_sum:
# find col and use DFS move to next row, if in the end len of row is not equal with # of queens, then we do not return it
DFS(queens + [q], xy_dif + [p - q], xy_sum + [p + q])
result = []
# 1: store valid visited queens
# 2: store invalid nodes of diagnal 1: y-x (y =x+b)
# 3: store invalid nodes of diagnal 2: y+x (y =-x+b)
DFS([], [], [])
# Here sol is each row
# i in sol is each col
return [["." * i + "Q" + "." * (n - i - 1) for i in sol] for sol in result]
# Time: O(N!): factorial N
# Space:O(N)
```
#### File: joshlyman/Josh-LeetCode/057_Insert_Interval.py
```python
class Solution:
def insert(self, intervals: 'List[Interval]', newInterval: 'Interval') -> 'List[Interval]':
# init data
new_start, new_end = newInterval
idx, n = 0, len(intervals)
output = []
# add all intervals starting before newInterval
while idx < n and new_start > intervals[idx][0]:
output.append(intervals[idx])
idx += 1
# add newInterval
# if there is no overlap, just add the interval
if not output or output[-1][1] < new_start:
output.append(newInterval)
# if there is an overlap, merge with the last interval
else:
output[-1][1] = max(output[-1][1], new_end)
# add next intervals, merge with newInterval if needed
while idx < n:
interval = intervals[idx]
start, end = interval
idx += 1
# if there is no overlap, just add an interval
if output[-1][1] < start:
output.append(interval)
# if there is an overlap, merge with the last interval
else:
output[-1][1] = max(output[-1][1], end)
return output
# V2
class Solution:
def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:
if not intervals:
return [newInterval]
result = []
for interval in intervals:
# the new interval is after the range of other interval, so we can leave the current interval baecause the new one does not overlap with it
if interval[1] < newInterval[0]:
result.append(interval)
# the new interval's range is before the other, so we can add the new interval and update it to the current one
elif interval[0] > newInterval[1]:
result.append(newInterval)
newInterval = interval
# the new interval is in the range of the other interval, we have an overlap, so we must choose the min for start and max for end of interval
# continue as new interval since it might overlap with others
elif interval[1] >= newInterval[0] or interval[0] <= newInterval[1]:
newInterval[0] = min(interval[0], newInterval[0])
newInterval[1] = max(newInterval[1], interval[1])
result.append(newInterval);
return result
```
#### File: joshlyman/Josh-LeetCode/065_Valid_Number.py
```python
class Solution:
def isNumber(self, s: str) -> bool:
s = s.strip()
met_dot = met_e = met_digit = False
for i, char in enumerate(s):
if char in ['+', '-']:
if i > 0 and s[i-1] != 'e':
return False
elif char == '.':
if met_dot or met_e: return False
met_dot = True
elif char == 'e':
if met_e or not met_digit:
return False
met_e, met_digit = True, False
elif char.isdigit():
met_digit = True
else:
return False
return met_digit
# Time: O(N)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/067_Add_Binary.py
```python
class Solution:
def addBinary(self, a: str, b: str) -> str:
# can covert str to int but this will not work when string is very long and takes very long time. O(N+M)
# 1.use bit by bit computation to speed up, O(max(N,M)), space is O(max(N,M))
if a == "0":
return b
if b == "0":
return a
carry = 0
result = ''
a = list(a)
b = list(b)
while a or b or carry:
if a:
carry += int(a.pop())
if b:
carry += int(b.pop())
result += str(carry %2)
carry //= 2
return result[::-1]
# Time: O(max(N,M))
# Space O(max(N,M))
# 2.use bit manipulation if not allowed to use addition
# answers w/o carry is x^y, carry is x&y <<1, shift 1 bit to left
# answers as new x, carry as new y, do loop
class Solution:
def addBinary(self, a: str, b: str) -> str:
# convert them to integer
x, y = int(a, 2), int(b, 2)
while y:
answer = x ^ y
carry = (x & y) << 1
x, y = answer, carry
# return x as binary form
return bin(x)[2:]
# Time: O(N+M)
# Space O(max(N,M))
```
#### File: joshlyman/Josh-LeetCode/076_Minimum_Window_Substring.py
```python
def minWindow(s, t):
need = collections.Counter(t) #hash table to store char frequency
missing = len(t) #total number of chars we care
start, end = 0, 0
i = 0
for j, char in enumerate(s, 1): #index j from 1
if need[char] > 0:
missing -= 1
need[char] -= 1
if missing == 0: #match all chars
while i < j and need[s[i]] < 0: #remove chars to find the real start
need[s[i]] += 1
i += 1
need[s[i]] += 1 #make sure the first appearing char satisfies need[char]>0
missing += 1 #we missed this first char, so add missing by 1
if end == 0 or j-i < end-start: #update window
start, end = i, j
i += 1 #update i to start+1 for next window
return s[start:end]
# Time: O(|S|+|T|)
# Space:O(|S|+|T|)
# Refer from:
# https://leetcode.com/problems/minimum-window-substring/solution/
# Sliding Window
# We start with two pointers, leftleft and rightright initially pointing to the first element of the string S.
# We use the rightright pointer to expand the window until we get a desirable window i.e. a window that contains all of the characters of T.
# Once we have a window with all the characters, we can move the left pointer ahead one by one. If the window is still a desirable one we keep on updating the minimum window size.
# If the window is not desirable any more, we repeat step 2 onwards.
# The current window is s[i:j] and the result window is s[I:J]. In need[c] I store how many times I
# need character c (can be negative) and missing tells how many characters are still missing.
# In the loop, first add the new character to the window. Then, if nothing is missing,
# remove as much as possible from the window start and then update the result.
class Solution:
def minWindow(self, s: str, t: str) -> str:
m = len(s)
n = len(t)
if m < n:
return ''
lt = {}
# put t into dict (lt) and count how many # for each char
for i in t:
if i not in lt:
lt[i] = 1
else:
lt[i] += 1
# missing is to count how many remaining char needed from substring
# finally get candidate substring which satisfy need of t
missing = n
i = I = J = 0
for j, c in enumerate(s, 1):
if c in lt and lt[c] > 0:
missing -= 1
if c in lt:
# lt can be negative
lt[c] -= 1
# i is index of candidate substring, remove as many as char from candidate
while i < j and not missing:
if not J or j-i < J-I:
I, J = i, j
if s[i] not in lt:
i += 1
continue
else:
# if lt contains s[i], then # of s[i] +1, might reach to 0
lt[s[i]] += 1
# if > 0, means we need more, then missing +1
if lt[s[i]] > 0:
missing += 1
i += 1
return s[I:J]
# Time: O(|S|+|T|)
# Space:O(|S|+|T|)
# Optimized Sliding Window
# A small improvement to the above approach can reduce the time complexity of the algorithm to O(2*∣filtered_S∣+∣S∣+∣T∣),
# where filtered(S) is the string formed from S by removing all the elements not present in T
```
#### File: joshlyman/Josh-LeetCode/091. Decode Ways.py
```python
def numDecodings(self, s: str) -> int:
if not s or s[0]=='0':
return 0
dp = [0 for x in range(len(s) + 1)]
# base case initialization
dp[0:2] = [1,1]
for i in range(2, len(s) + 1):
# One step jump
if 0 < int(s[i-1:i]): #(2)
dp[i] = dp[i - 1]
# Two step jump
if 10 <= int(s[i-2:i]) <= 26: #(3)
dp[i] += dp[i - 2]
return dp[-1]
```
#### File: joshlyman/Josh-LeetCode/094_Binary_Tree_Inorder_Traversal.py
```python
class Solution:
def inorderTraversal(self, root: TreeNode) -> List[int]:
res = []
self.dfs(root,res)
return res
def dfs(self,root,res):
if root is not None:
if root.left is not None:
self.dfs(root.left,res)
res.append(root.val)
if root.right is not None:
self.dfs(root.right,res)
# Time: O(N)
# The recursive function is T(n) = 2 T(n/2)+1
# Space:O(N)
# The worst case space required is O(n),
# and in the average case it is O(logn) where n is number of nodes.
# Approach 2: Iterative
# use stack
# initialize the stack
class Solution:
def inorderTraversal(self, root: TreeNode) -> List[int]:
stack = []
# initialize the traveral list
traversal = []
# while we're at a valid node or there are
# still nodes to traverse...
while stack or root:
if root:
# if we're at a valid node,
# remember where we've been and keep moving left
stack.append(root)
root = root.left
else:
# otherwise we've hit a dead end so
# -- pop the most recent value
# -- report out
# -- move right
root = stack.pop()
traversal.append(root.val)
root = root.right
return traversal
# Time: O(N)
# Space:O(N)
# V2
# https://www.jiuzhang.com/problem/binary-tree-inorder-traversal/
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def inorderTraversal(self, root: TreeNode) -> List[int]:
if not root:
return []
# 创建一个 dummy node,右指针指向 root
# 并放到 stack 里,此时 stack 的栈顶 dummy
# 是 iterator 的当前位置
dummy = TreeNode(0)
dummy.right = root
stack = [dummy]
inorder = []
# 每次将 iterator 挪到下一个点
# 也就是调整 stack 使得栈顶到下一个点
while stack:
node = stack.pop()
if node.right:
node = node.right
while node:
stack.append(node)
node = node.left
if stack:
inorder.append(stack[-1].val)
return inorder
# Approach 3: Morris Traversal
# https://leetcode.com/problems/binary-tree-inorder-traversal/solution/
# Refer from
# https://leetcode.com/problems/binary-tree-inorder-traversal/discuss/668448/Morris-Traversal
def inorderTraversal(self, root: TreeNode) -> List[int]:
res = []
while root:
if not root.left: # if we don't have a left, this is our best in-order value at the moment. add it to the list and move right.
res.append(root.val)
root = root.right
else:
pred = self.findPredecessor(root) # find the predecessor for the given node. This is the farthest right of the first left we see.
# if we have a right we have move on to explore this sub tree. The pred.right != root check is to ensure that we're not ex
if pred.right != root:
pred.right = root
root = root.left
else:
# otherwise, we have found a pointer back to the current root and we need to rewrite the tree structure. This is basically a form of "have we seen this before?".
root.left = None
return res
def findPredecessor(self, root: TreeNode) -> TreeNode:
curr = root.left
while curr.right and curr.right != root:
curr = curr.right
return curr
# Time complexity : O(n).
# To prove that the time complexity is O(n), the biggest problem lies in finding the time complexity of finding the predecessor nodes of all the nodes
# in the binary tree. Intuitively, the complexity is O(nlogn), because to find the predecessor node for a single node related to the height of the tree.
# But in fact, finding the predecessor nodes for all nodes only needs O(n) time. Because a binary Tree with n nodes has n-1n−1 edges,
# the whole processing for each edges up to 2 times, one is to locate a node, and the other is to find the predecessor node. So the complexity is O(n).
# Space complexity : O(n). Arraylist of size n is used.
```
#### File: joshlyman/Josh-LeetCode/104_Maximum_Depth_of_Binary_Tree.py
```python
class Solution:
def maxDepth(self, root: TreeNode) -> int:
if root is None:
return 0
else:
return max(self.maxDepth(root.left),self.maxDepth(root.right))+1
# Time: O(n)
# Space:O(logn)
class Solution:
def maxDepth(self, root: TreeNode) -> int:
if root is None:
return 0
if root.left == None and root.right == None:
return 1
else:
left_height = 1+self.maxDepth(root.left)
right_height = 1+self.maxDepth(root.right)
return max(left_height,right_height)
# Time: O(n)
# Space:O(logn)
```
#### File: joshlyman/Josh-LeetCode/105. Construct Binary Tree from Preorder and Inorder Traversal.py
```python
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
# preorder: root -> left -> right
# inorder: left -> root -> right
if not inorder:
return
# if inorder:
ind = inorder.index(preorder.pop(0))
root = TreeNode(inorder[ind])
root.left = self.buildTree(preorder, inorder[0:ind])
root.right = self.buildTree(preorder, inorder[ind+1:])
return root
# V2
class Solution:
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
def helper(in_left = 0, in_right = len(inorder)):
nonlocal pre_idx
# if there is no elements to construct subtrees
if in_left == in_right:
return None
# pick up pre_idx element as a root
root_val = preorder[pre_idx]
root = TreeNode(root_val)
# root splits inorder list
# into left and right subtrees
index = idx_map[root_val]
# recursion
pre_idx += 1
# build left subtree
root.left = helper(in_left, index)
# build right subtree
root.right = helper(index + 1, in_right)
return root
# start from first preorder element
pre_idx = 0
# build a hashmap value -> its index
idx_map = {val:idx for idx, val in enumerate(inorder)}
return helper()
# V3
class Solution(object):
def buildTree(self, preorder, inorder):
inorder_map = {val: i for i, val in enumerate(inorder)}
return self.dfs_helper(inorder_map, preorder, 0, len(inorder) - 1)
def dfs_helper(self, inorder_map, preorder, left, right):
if not preorder :
return
node = preorder.pop(0)
root = TreeNode(node)
root_index = inorder_map[node]
if root_index != left:
root.left = self.dfs_helper(inorder_map, preorder, left, root_index - 1)
if root_index != right:
root.right = self.dfs_helper(inorder_map, preorder, root_index + 1, right)
return root
# Time: O(N)
# Space;O(N)
```
#### File: joshlyman/Josh-LeetCode/1094. Car Pooling.py
```python
class Solution:
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
lst = []
for n, start, end in trips:
lst.append((start, n))
lst.append((end, -n))
lst.sort()
pas = 0
for loc in lst:
pas += loc[1]
if pas > capacity:
return False
return True
# Time: O(NlogN)
# Space:O(N)
# Bucket Sort
# trips.length <= 1000
class Solution:
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
timestamp = [0] * 1001
for trip in trips:
timestamp[trip[1]] += trip[0]
timestamp[trip[2]] -= trip[0]
used_capacity = 0
for passenger_change in timestamp:
used_capacity += passenger_change
if used_capacity > capacity:
return False
return True
# Time: O(max(N,1001))
# Space:O(1001) = O(1)
```
#### File: joshlyman/Josh-LeetCode/110_Balanced_Binary_Tree.py
```python
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
def getheight(root):
if root is None:
return 0
left_height, right_height = getheight(root.left),getheight(root.right)
if left_height < 0 or right_height <0 or abs(left_height-right_height)>1:
return -1
return max(left_height,right_height)+1
h = getheight(root)
return (h>=0)
# Time: O(n), For every subtree, we compute its height in constant time as well as compare the height of its children.
# Space:O(n), The recursion stack may go up to O(n) if the tree is unbalanced.
```
#### File: joshlyman/Josh-LeetCode/1143_Longest_Common_Subsequence.py
```python
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
return self.helper(text1, text2, 0, 0)
def helper(self, text1, text2, i, j):
if i == len(text1) or j == len(text2):
return 0
if text1[i] == text2[j]:
return 1 + self.helper(text1, text2, i + 1, j + 1)
else:
return max(self.helper(text1, text2, i+1, j), self.helper(text1, text2, i, j + 1))
lcs("AXYT", "AYZX")
/ \
lcs("AXY", "AYZX") lcs("AXYT", "AYZ")
/ \ / \
lcs("AX", "AYZX") lcs("AXY", "AYZ") lcs("AXY", "AYZ") lcs("AXYT", "AY")
# Time: O(MN^2)
# Space:O(MN)
# 2. Recursion with Memoization
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
m = len(text1)
n = len(text2)
memo = [[-1 for _ in range(n + 1)] for _ in range(m + 1)]
return self.helper(text1, text2, 0, 0, memo)
def helper(self, text1, text2, i, j, memo):
if memo[i][j] < 0:
if i == len(text1) or j == len(text2):
memo[i][j] = 0
elif text1[i] == text2[j]:
memo[i][j] = 1 + self.helper(text1, text2, i + 1, j + 1, memo)
else:
memo[i][j] = max(
self.helper(text1, text2, i + 1, j, memo),
self.helper(text1, text2, i, j + 1, memo),
)
return memo[i][j]
# Time: O(MN)
# Space:O(MN)
# 3. Bottom Up DP
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
m = len(text1)
n = len(text2)
memo = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
for row in range(1, m + 1):
for col in range(1, n + 1):
if text1[row - 1] == text2[col - 1]:
memo[row][col] = 1 + memo[row - 1][col - 1]
else:
memo[row][col] = max(memo[row][col - 1], memo[row - 1][col])
return memo[m][n]
# Time: O(MN)
# Space:O(MN)
# 4. DP with Reduced space complexity
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
m = len(text1)
n = len(text2)
if m < n:
return self.longestCommonSubsequence(text2, text1)
# instead of using m and n for 2D array, use 1D array n to represent DP space
memo = [[0 for _ in range(n + 1)] for _ in range(2)]
for i in range(m):
for j in range(n):
if text1[i] == text2[j]:
memo[1 - i % 2][j + 1] = 1 + memo[i % 2][j]
else:
memo[1 - i % 2][j + 1] = max(memo[1 - i % 2][j], memo[i % 2][j + 1])
return memo[m % 2][n]
# Time: O(MN)
# Space:O(Min(M,N))
```
#### File: joshlyman/Josh-LeetCode/114_Flatten_Binary_Tree_to_Linked_List.py
```python
class Solution:
def flatten(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
# Handle the null scenario
if not root:
return None
node = root
while node:
# If the node has a left child
if node.left:
# Find the rightmost node
rightmost = node.left
while rightmost.right:
rightmost = rightmost.right
# rewire the connections
rightmost.right = node.right
node.right = node.left
node.left = None
# move on to the right side of the tree
node = node.right
# Time: O(N)
# Space:O(1)
# 3.Use reversed preorder traversal
# find rightmost node first, then back to left from bottom to top
# preorder is root -> left -> right, here is right -> left -> root
def __init__(self):
self.prev = None
def flatten(self, root):
if not root:
return None
self.flatten(root.right)
self.flatten(root.left)
root.right = self.prev
root.left = None
self.prev = root
# Time: O(N)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/1209. Remove All Adjacent Duplicates in String II.py
```python
class Solution:
def removeDuplicates(self, s: str, k: int) -> str:
stack = [['#', 0]]
for c in s:
if stack[-1][0] == c:
stack[-1][1] += 1
if stack[-1][1] == k:
stack.pop()
else:
stack.append([c, 1])
return ''.join(c * k for c, k in stack)
# Time: O(N)
# Space:O(N)
```
#### File: joshlyman/Josh-LeetCode/1233_Remove_Sub-Folders_from_the_Filesystem.py
```python
class Solution:
def removeSubfolders(self, folder: List[str]) -> List[str]:
"""
Sort folders, so that parent will always occur in front of child
For each folder, check if it starts with parent folder
If it does, it's a subfolder, skip it. If not, make it next parent folder.
"""
folders = folder
folders.sort()
output = []
parent = ' '
for folder in folders:
if not folder.startswith(parent):
output.append(folder)
parent = folder + '/'
return output
# Time: O(NlogN)
# Space:O(1), not count the output, if count, then O(N)
# trie
class Solution:
def removeSubfolders(self, folder: List[str]) -> List[str]:
Node = lambda: defaultdict(Node)
trie = Node()
ans = []
for path in sorted(folder):
n = trie
for c in path[1:].split('/'):
n = n[c]
if '$' in n:
break
else:
n['$'] = True
ans.append(path)
return ans
# Time: O(NM)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/124_Binary_Tree_Maximum_Path_Sum.py
```python
class Solution:
def maxPathSum(self, root: TreeNode) -> int:
# one needs to modify the above function and to check at each step what is better : to continue the current path or to start a new path with the current node as a highest node in this new path.
self.max_path = float("-inf") # placeholder to be updated
def get_max_gain(node):
# nonlocal max_path # This tells that max_path is not a local variable
if node is None:
return 0
gain_on_left = max(get_max_gain(node.left), 0) # Read the part important observations
gain_on_right = max(get_max_gain(node.right), 0) # Read the part important observations
current_max_path = node.val + gain_on_left + gain_on_right # Read first three images of going down the recursion stack
self.max_path = max(self.max_path, current_max_path) # Read first three images of going down the recursion stack
return node.val + max(gain_on_left, gain_on_right) # Read the last image of going down the recursion stack
get_max_gain(root) # Starts the recursion chain
return self.max_path
# Time: O(N)
# Space:O(H)
```
#### File: joshlyman/Josh-LeetCode/1265_Print_Immutable_Linked_List_in_Reverse.py
```python
class Solution:
def printLinkedListInReverse(self, head: 'ImmutableListNode') -> None:
# create two nodes
# tail is the end pointer
# cur is to scan from head to tail
tail = None
cur = head
# go from back to force
while tail!= head:
while cur.getNext()!= tail:
cur = cur.getNext()
# reach to the None, which is the end
# give tail the cur
# then cur from head to scan it again
cur.printValue()
tail = cur
cur = head
# Time: O(N)
# Space:O(1)
# Recursion
class Solution:
def printLinkedListInReverse(self, head: 'ImmutableListNode') -> None:
# or we can use list []
stack = deque([])
stack.append(head)
while head is not None:
head = head.getNext()
if head is not None:
stack.append(head)
while stack:
stack.pop().printValue()
# Time: O(N)
# Space:O(N)
```
#### File: joshlyman/Josh-LeetCode/129_Sum_Root_to_Leaf_Numbers.py
```python
class Solution:
def sumNumbers(self, root: TreeNode) -> int:
root_to_leaf = 0
stack = [(root,0)]
while stack:
root,curr_number = stack.pop()
if root is not None:
curr_number = curr_number*10+root.val
# if it's a leaf, update root-to-leaf sum
if root.left is None and root.right is None:
root_to_leaf += curr_number
else:
# put right first, then put left
stack.append((root.right,curr_number))
stack.append((root.left,curr_number))
return root_to_leaf
# Time: O(N)
# Space:O(H)
# recursive
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sumNumbers(self, root: TreeNode) -> int:
self.root_to_leaf = 0
def preorder(node,curr_number):
if node is not None:
curr_number = curr_number * 10 + node.val
# reach to the leaf node
if node.left is None and node.right is None:
self.root_to_leaf += curr_number
preorder(node.left,curr_number)
preorder(node.right,curr_number)
preorder(root,0)
return self.root_to_leaf
# Time: O(N)
# Space:O(H)
```
#### File: joshlyman/Josh-LeetCode/1314. Matrix Block Sum.py
```python
class Solution:
def matrixBlockSum(self, mat: List[List[int]], K: int) -> List[List[int]]:
m, n = len(mat), len(mat[0])
rangeSum = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(m):
for j in range(n):
rangeSum[i + 1][j + 1] = rangeSum[i + 1][j] + rangeSum[i][j + 1] - rangeSum[i][j] + mat[i][j]
# to get mat[i][j], we will do the reverse calculation from above equation
ans = [[0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
# This is very important
r1, c1, r2, c2 = max(0, i - K), max(0, j - K), min(m, i + K + 1), min(n, j + K + 1)
ans[i][j] = rangeSum[r2][c2] - rangeSum[r1][c2] - rangeSum[r2][c1] + rangeSum[r1][c1]
return ans
# Time: O(NM)
# Space:O(NM)
# More related practice:
# 304. Range Sum Query 2D - Immutable
# 307. Range Sum Query - Mutable
# 308. Range Sum Query 2D - Mutable: Premium
```
#### File: joshlyman/Josh-LeetCode/1328. Break a Palindrome.py
```python
class Solution:
def breakPalindrome(self, palindrome: str) -> str:
if len(palindrome) == 1:
return ""
# check half of strings to replace the non-a to a
for i in range(len(palindrome) // 2):
if palindrome[i] != 'a':
return palindrome[:i] + 'a' + palindrome[i + 1:]
# it means all chars are "a", so have to replace the last a as b
if palindrome[:-1]:
return palindrome[:-1] + 'b'
else:
return ""
# Time: O(N)
# Space:O(N)
```
#### File: joshlyman/Josh-LeetCode/1329_Sort_the_Matrix_Diagonally.py
```python
class Solution:
def diagonalSort(self, mat: List[List[int]]) -> List[List[int]]:
# (1,4), i - j = -3
# (1,3), (2,4), i -j = -2
# (1,2), (2,3), (3,4), i-j = -1
n, m = len(mat), len(mat[0])
# create a dict and each element is a list
d = collections.defaultdict(list)
for i in range(n):
for j in range(m):
# store element in each diagonal level
d[i - j].append(mat[i][j])
# sort in reversed order: [3,2,1]
for k in d:
d[k].sort(reverse=1)
# pop each item: 1,2,3
for i in range(n):
for j in range(m):
mat[i][j] = d[i - j].pop()
return mat
# or we can use sort and pop(0)
# for k in d.keys():
# d[k].sort()
# for i in range(rows):
# for j in range(cols):
# mat[i][j] = d[i-j].pop(0)
# Time O(MNlogD), where D is the length of diagonal with D = min(M,N).
# Space O(MN)
```
#### File: joshlyman/Josh-LeetCode/137_Single_Number_II.py
```python
class Solution:
def singleNumber(self, nums: List[int]) -> int:
# use hashset or hashmap will give O(N) space
# Hashset
# 3×(a+b+c)−(a+a+a+b+b+b+c)=2c
# return (3 * sum(set(nums)) - sum(nums)) // 2
# Time: O(N), Space: O(N)
# Hashmap
# from collections import Counter
# class Solution:
# def singleNumber(self, nums):
# hashmap = Counter(nums)
# for k in hashmap.keys():
# if hashmap[k] == 1:
# return k
# Time: O(N), Space: O(N)
# use bit manipulation will give O(1) space
# XOR is to be used to detect the bit which appears odd number of times: 1, 3, 5, etc.
# 2^2 = 0, 2^2^2 = 2
seen_once = seen_twice = 0
for num in nums:
# first appearance:
# add num to seen_once
# don't add to seen_twice because of presence in seen_once
# second appearance:
# remove num from seen_once
# add num to seen_twice
# third appearance:
# don't add to seen_once because of presence in seen_twice
# remove num from seen_twice
seen_once = ~seen_twice & (seen_once ^ num)
seen_twice = ~seen_once & (seen_twice ^ num)
return seen_once
# Time: O(N)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/139_Word_Break.py
```python
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
n = len(s)
f = [False for i in range(n+1)]
f[0] = True
for i in range(n):
if f[i]:
for j in wordDict:
l = len(j)
if s[i:i+l] == j:
f[i+l] = True
return f[n]
# Time: O(N^2)
# Space:O(N)
# BFS and DFS
# Starts with string s. For each string visited, chop off front of string if it starts with a word in the dictionary and adds the shortened string to the queue or stack. If string becomes empty, that means word break succeeded. Keep a set of seen string states to avoid duplicate work.
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
from collections import deque
q = deque([s])
seen = set()
while q:
s = q.popleft() # popleft() = BFS ; pop() = DFS
for word in wordDict:
if s.startswith(word):
new_s = s[len(word):]
if new_s == "":
return True
if new_s not in seen:
q.append(new_s)
seen.add(new_s)
return False
# Time: O(N^2)
# Space:O(N)
```
#### File: joshlyman/Josh-LeetCode/1424_Diagonal_Traverse_II.py
```python
def findDiagonalOrder(self, A):
res = []
for i, r in enumerate(A):
for j, a in enumerate(r):
if len(res) <= i + j:
res.append([])
res[i + j].append(a)
return [a for r in res for a in reversed(r)]
# Time: O(N)
# Space:O(N)
# mine
class Solution:
def findDiagonalOrder(self, nums: List[List[int]]) -> List[int]:
res = []
for i,r in enumerate(nums):
for j,val in enumerate(r):
# for each level, create a list to store
if len(res) <= i+ j:
res.append([])
res[i+j].append(val)
output = []
for level in res:
for v in reversed(level):
output.append(v)
return output
```
#### File: joshlyman/Josh-LeetCode/143_Reorder_List.py
```python
class Solution:
def reorderList(self, head: ListNode) -> None:
"""
Do not return anything, modify head in-place instead.
"""
# 1.find a middle node using fast and slow two pointers
# 2.reverse second part
# 3.merge first and second parts
if head is None:
return None
slow = fast = head
while fast and fast.next:
# fast approches 2 steps, so slow is middle when fast goes to the end
slow = slow.next
fast = fast.next.next
# convert 1->2->3->4->5->6 into 1->2->3->4 and 6->5->4
# reverse the second half in-place
prev,curr = None,slow
while curr:
curr.next,prev,curr = prev,curr,curr.next
# merge two sorted linked lists [Problem 21]
# merge 1->2->3->4 and 6->5->4 into 1->6->2->5->3->4
first,second = head,prev
while second.next:
tmp = first.next
first.next = second
first = tmp
tmp = second.next
second.next = first
second = tmp
# another simple way:
# first, second = head, prev
# while second.next:
# first.next, first = second, first.next
# second.next, second = first, second.next
# Time: O(N)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/144_Binary_Tree_Preorder_Traversal.py
```python
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
if root is None:
return []
# Let's start from the root and then at each iteration pop the current node out of the stack and push its child nodes. In the implemented strategy we push nodes into output list following the order Top->Bottom and Left->Right, that naturally reproduces preorder traversal.
stack = [root]
output = []
while stack:
root = stack.pop()
if root is not None:
output.append(root.val)
# In stack, last in first out, so if we push right node first, right is processsd in the last. we push left last, so left is done first.
if root.right is not None:
stack.append(root.right)
if root.left is not None:
stack.append(root.left)
return output
# Time: O(N)
# we visit each node exactly once, thus the time complexity is O(N), where N is the number of nodes, i.e. the size of tree.
# Space:O(N)
# depending on the tree structure, we could keep up to the entire tree, therefore, the space complexity is O(N).
# best is O(logN)
# Approach 3: recursion
def preorderTraversal(self, root):
res = []
self.dfs(root, res)
return res
def dfs(self, root, res):
if root:
res.append(root.val)
self.dfs(root.left, res)
self.dfs(root.right, res)
# Approach 2: Morris traversal
# no need to use stack
class Solution(object):
def preorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
node, output = root, []
while node:
if not node.left:
output.append(node.val)
node = node.right
else:
current = node.left
while current.right and current.right is not node:
current = current.right
if not current.right:
output.append(node.val)
current.right = node
node = node.left
else:
current.right = None
node = node.right
return output
# Time : O(N)
# we visit each predecessor exactly twice descending down from the node,
# thus the time complexity is O(N), where N is the number of nodes, i.e. the size of tree.
# Space: O(N),
# we use no additional memory for the computation itself, but output list contains N elements,
# and thus space complexity is O(N).
```
#### File: joshlyman/Josh-LeetCode/148_Sort_List.py
```python
class Solution:
def sortList(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
fast, slow = head.next, head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
start = slow.next
slow.next = None
l, r = self.sortList(head), self.sortList(start)
return self.merge(l, r)
def merge(self, l, r):
if not l or not r:
return l or r
dummy = p = ListNode(0)
while l and r:
if l.val < r.val:
p.next = l
l = l.next
else:
p.next = r
r = r.next
p = p.next
p.next = l or r
return dummy.next
# Time: O(NlogN), where nn is the number of nodes in linked list. The algorithm can be split into 2 phases, Split and Merge.
# Space:O(logN)
# where nn is the number of nodes in linked list. Since the problem is recursive, we need additional space
# to store the recursive call stack. The maximum depth of the recursion tree is nlogn
# quick sort
# Quicksort is also one of the efficient algorithms with the average time complexity of
# O(nlogn). But the worst-case time complexity is O(n^2). Also, variations of the quick sort
# like randomized quicksort are not efficient for the linked list because unlike arrays,
# random access in the linked list is not possible in O(1) time. If we sort the linked list
# using quicksort, we would end up using the head as a pivot element which may not be efficient in all scenarios.
class Solution(object):
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
def partition(start, end):
node = start.next.next
pivotPrev = start.next
pivotPrev.next = end
pivotPost = pivotPrev
while node != end:
temp = node.next
if node.val > pivotPrev.val:
node.next = pivotPost.next
pivotPost.next = node
elif node.val < pivotPrev.val:
node.next = start.next
start.next = node
else:
node.next = pivotPost.next
pivotPost.next = node
pivotPost = pivotPost.next
node = temp
return [pivotPrev, pivotPost]
def quicksort(start, end):
if start.next != end:
prev, post = partition(start, end)
quicksort(start, prev)
quicksort(post, end)
newHead = ListNode(0)
newHead.next = head
quicksort(newHead, None)
return newHead.next
# Time: best is O(NlogN), worst is O(N^2)
# Space: O(1)
```
#### File: joshlyman/Josh-LeetCode/1528_Shuffle_String.py
```python
class Solution:
def restoreString(self, s: str, indices: List[int]) -> str:
res = [""]*len(s)
for idx,si in enumerate(s):
res[indices[idx]] = si
return "".join(res)
# Time: O(N)
# Space:O(N)
```
#### File: joshlyman/Josh-LeetCode/1539_Kth_Missing_Positive_Number.py
```python
class Solution:
def findKthPositive(self, arr: List[int], k: int) -> int:
lo = 0
hi = len(arr) - 1
while(lo <= hi):
mid = lo + (hi - lo) // 2
missing = arr[mid] - (mid + 1) # ideally, arr[i] should hold i + 1 value i.e arr[0] = 1, arr[1] = 2..etc
if missing >= k:
hi = mid - 1
else:
lo = mid + 1
return lo + k
# Time: O(logN)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/163_Missing_Ranges.py
```python
class Solution:
def findMissingRanges(self, nums: List[int], lower: int, upper: int) -> List[str]:
# need to consider low and upper element because res.append(str(nums[i]+1)) and we use range 2 (nums[i+1] - nums[i] == 2)
# for example: [1], low = 0, upper = 100.
nums = [lower-1] + nums + [upper+1]
res = []
for i in range(len(nums)-1):
if nums[i+1] - nums[i] == 2:
res.append(str(nums[i]+1))
elif nums[i+1] - nums[i] > 2:
res.append(str(nums[i]+1)+'->'+str(nums[i+1]-1))
return res
# Time: O(N)
# Space:O(N)
```
#### File: joshlyman/Josh-LeetCode/166_Fraction_to_Recurring_Decimal.py
```python
class Solution:
def fractionToDecimal(self, numerator: int, denominator: int) -> str:
n, remainder = divmod(abs(numerator), abs(denominator))
# if both negative, then * can remove the negative sign
# if still negative, means one of them is negative and cannot be removed
sign = '-' if numerator*denominator < 0 else ''
result = [sign+str(n), '.']
stack = []
# store remainder and keep remainder times 10 until the remainder in stack is repeated
# (0,4) = divmod(4,333)
# (0,40) = divmod(40,333)
# (1,67) = divmod(400,333)
# (2,4) = divmod(670,333)
while remainder not in stack:
stack.append(remainder)
n, remainder = divmod(remainder*10, abs(denominator))
result.append(str(n))
# or use dict instead of stack
# remainder here is repeated first one
idx = stack.index(remainder)
# after 0. to insert into 3rd position
result.insert(idx+2, '(')
result.append(')')
# remove .(0) after integer if it is only integer
return ''.join(result).replace('(0)', '').rstrip('.')
# Time: O(N)
# Space:O(N)
```
#### File: joshlyman/Josh-LeetCode/191_Number_of_1_Bits.py
```python
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
return bin(n).count('1')
# Using bit operation to cancel a 1 in each round
# Think of a number in binary n = XXXXXX1000, n - 1 is XXXXXX0111. n & (n - 1) will be XXXXXX0000
# which is just remove the last significant 1
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
c = 0
while n:
n &= n - 1
c += 1
return c
class Solution:
def hammingWeight(self, n: int) -> int:
ans=0
while n>0:
if n%2==1:
ans+=1
n=n//2
return ans
# Time: O(1)
# Space:O(1)
# use bit manipulation
def hammingWeight(self, n: int) -> int:
out = 0
while n > 0:
# n & 1 means n%2
if n & 1:
out += 1
# n>>=1 means n//2
n >>= 1
return out
```
#### File: joshlyman/Josh-LeetCode/206_Reverse_Linked_List.py
```python
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
prev = None
cur = head
while cur:
# switch current node with prev node
1->2: prev: None, 1: cur, 2: Next
Next = cur.next
# prev: none, 1->NULL
cur.next = prev
# shift to the left:
# cur: 1, prev: none
prev = cur
# the most important to understand here:
# Next:2,becomes to current node
cur = Next
# or: cur.next, prev, cur = prev, cur, cur.next
# 3,1,2 = 1,2,3
return prev
# Time: O(N)
# Space:O(1)
# simpler code:
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
cur, prev = head, None
while cur:
# a,b,c = b,c,a, order: 4,5,6 = 1,2,3
# 1->2->3 => 3->2->1
cur.next, prev, cur = prev, cur, cur.next
return prev
# other solution:
# recursion
# def reverseList(self, head):
# return self._reverse(head)
# def _reverse(self, node, prev=None):
# if not node:
# return prev
# n = node.next
# node.next = prev
# return self._reverse(n, node)
# Time: O(N)
# Space:O(N), The extra space comes from implicit stack space due to recursion. The recursion could go up to n levels deep.
```
#### File: joshlyman/Josh-LeetCode/211_Design_Add_and_Search_Words_Data_Structure.py
```python
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.trie={}
def addWord(self, word: str) -> None:
"""
Adds a word into the data structure.
"""
node = self.trie
for ch in word:
if not ch in node:
node[ch] = {}
node = node[ch]
# mark here means reach to end, if there is $ then word is found
node['$'] = True
# Time: O(M)
# Space:O(M)
def search(self, word: str) -> bool:
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
"""
def search_in_node(word,node) -> bool:
for i,ch in enumerate(word):
if not ch in node:
# if the current character is '.', check all possible nodes at this level
if ch == '.':
for x in node:
# if x == $ that means it is already finished, so we dont need to move on
if x!='$' and search_in_node(word[i+1:],node[x]):
return True
# if no node leads to answer or current ch is not '.', then word is not found
return False
# if ch is found, go down to next level in trie
else:
node = node[ch]
# return the word is found, which is True stored as $ in node
return '$' in node
# Time: O(M), worst case is O(MxN) when word is underdefined (i.e. '.ig')
# Space:O(1) for well-defined words, O(M) for underdefined words
return search_in_node(word, self.trie)
# Your WordDictionary object will be instantiated and called as such:
# obj = WordDictionary()
# obj.addWord(word)
# param_2 = obj.search(word)
```
#### File: joshlyman/Josh-LeetCode/219_Contains_Duplicate_II.py
```python
class Solution:
def containsNearbyDuplicate(self, nums: List[int], k: int) -> bool:
hashmap = {}
for ind, val in enumerate(nums):
if val in hashmap and ind - hashmap[val]<=k:
return True
hashmap[val] = ind
return False
# Time: O(N)
# Space:O(min(M,N))
```
#### File: joshlyman/Josh-LeetCode/249_Group_Shifted_Strings.py
```python
class Solution:
def groupStrings(self, strings: List[str]) -> List[List[str]]:
hashmap = {}
for s in strings:
key = ()
for i in range(len(s) - 1):
circular_difference = 26 + ord(s[i+1]) - ord(s[i])
key += (circular_difference % 26,)
if key not in hashmap:
hashmap[key] = []
hashmap[key].append(s)
return list(hashmap.values())
# Time: O(ab) where a is the total number of strings and b is the length of the longest string in strings.
# Space:O(N)
```
#### File: joshlyman/Josh-LeetCode/268. Missing Number.py
```python
class Solution:
def missingNumber(self, nums: List[int]) -> int:
numlen = len(nums)
for i in range(numlen+1):
if i not in nums:
return i
# Time: O(N^2) since in list is O(N)
# Space:O(1)
class Solution:
def missingNumber(self, nums: List[int]) -> int:
numlen = len(nums)
numset = set(nums)
for i in range(numlen+1):
if i not in numset:
return i
# Time: O(N) since in set is O(1)
# Space:O(N)
class Solution:
def missingNumber(self, nums: List[int]) -> int:
return sum(range(len(nums)+1)) - sum(nums)
# Time: O(N)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/270_Closest_Binary_Search_Tree_Value.py
```python
class Solution:
def closestValue(self, root: TreeNode, target: float) -> int:
def inorder(r:TreeNode):
if r is None:
return []
else:
rleft = inorder(r.left)
right = inorder(r.right)
return rleft + [r.val] + right
travelist = inorder(root)
# closenodevalue = min(travelist,key=lambda x:abs(x-target))
# return closenodevalue
abstravelist = [abs(x-target) for x in travelist]
idx = abstravelist.index(min(abstravelist))
return travelist[idx]
# Time: O(N)
# Space:O(N)
# 2. no need to traversal all, stop when found
class Solution:
def closestValue(self, root: TreeNode, target: float) -> int:
stack,pred = [], float('-inf')
while stack or root:
# go left as far as you can and add all nodes on the way into stack.
while root:
stack.append(root)
root = root.left
root = stack.pop()
# usually min value must be between previous node and current
if pred <= target and target <root.val:
return min(pred,root.val,key=lambda x: abs(target-x))
pred = root.val
root = root.right
# if finally still not found, then predecessor is the one
return pred
# Time: O(K): average case. k is an index of closest element.worst cases: O(H+K)
# It's known that average case is a balanced tree, in that case stack always contains a few elements,
# and hence one does 2k operations to go to kth element in inorder traversal
# (k times to push into stack and then k times to pop out of stack). That results in O(k) time complexity.
# The worst case is a completely unbalanced tree, then you first push H elements into stack and then pop out k elements, that results in
# O(H+k) time complexity.
# Space:O(H), up to O(H) to keep the stack in the case of non-balanced tree.
# 3. Binary search
class Solution:
def closestValue(self, root: TreeNode, target: float) -> int:
r = root.val
while root:
if abs(root.val - target) < abs(r - target):
r = root.val
# if smaller than root, go left, ow, go right
if target < root.val:
root = root.left
else:
root = root.right
return r
# Time: O(H) since here one goes from root down to a leaf.
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/314_Binary_Tree_Vertical_Order_Traversal.py
```python
class Solution:
def verticalOrder(self, root: TreeNode) -> List[List[int]]:
columnTable = collections.defaultdict(list)
queue = collections.deque([(root,0)])
while queue:
node,column = queue.popleft()
if node is None:
continue
columnTable[column].append(node.val)
queue.append([node.left,column-1])
queue.append([node.right,column+1])
return [columnTable[i] for i in sorted(columnTable)]
# Time: O(NlogN), where N is the number of nodes in the tree.
# In the first part of the algorithm, we do the BFS traversal, whose time complexity is O(N) since we traversed each node once and only once.
# In the second part, in order to return the ordered results, we then sort the obtained hash table by its keys, which could result in the O(NlogN) time complexity in the worst case scenario where the binary tree is extremely imbalanced (for instance, each node has only left child node.)
# As a result, the overall time complexity of the algorithm would be O(NlogN).
# Space:O(N)
# First of all, we use a hash table to group the nodes with the same column index. The hash table consists of keys and values. In any case, the values would consume O(N) memory. While the space for the keys could vary, in the worst case, each node has a unique column index, i.e. there would be as many keys as the values. Hence, the total space complexity for the hash table would still be O(N).
# During the BFS traversal, we use a queue data structure to keep track of the next nodes to visit. At any given moment, the queue would hold no more two levels of nodes. For a binary tree, the maximum number of nodes at a level would be (N+1)/2, which is also the number of leafs in a full binary tree. As a result, in the worst case, our queue would consume at most O(N+1)/2 *2 =O(N) space.
# Lastly, we also need some space to hold the results, which is basically a reordered hash table of size O(N) as we discussed before.
# BFS wo sorting
class Solution:
def verticalOrder(self, root: TreeNode) -> List[List[int]]:
if root is None:
return []
columnTable = collections.defaultdict(list)
queue = collections.deque([(root,0)])
min_col = 0
max_col = 0
while queue:
node,column = queue.popleft()
if node is None:
continue
columnTable[column].append(node.val)
min_col = min(min_col,column)
max_col = max(max_col,column)
queue.append([node.left,column-1])
queue.append([node.right,column+1])
# Here we replace sorted dict with the min and max so decrease time to O(N)
return [columnTable[i] for i in range(min_col,max_col+1)]
# Time: O(N)
# Space:O(N)
# DFS
# any traversal will be fine
# need to sort row, this is different with BFS
class Solution:
def verticalOrder(self, root: TreeNode) -> List[List[int]]:
# DFS by recursion
if root is None:
return []
columnTable = collections.defaultdict(list)
min_col = max_col = 0
def dfs(node,row,column):
if node is not None:
# this will be inorder DFS
# dfs(node.left, row + 1, column - 1)
nonlocal min_col, max_col
columnTable[column].append((row,node.val))
min_col = min(min_col, column)
max_col = max(max_col, column)
# preorder DFS,
dfs(node.left, row + 1, column - 1)
dfs(node.right, row + 1, column + 1)
dfs(root,0,0)
res = []
for col in range(min_col,max_col+1):
columnTable[col].sort(key=lambda x:x[0])
colVals = [val for row,val in columnTable[col]]
res.append(colVals)
return res
# Time: O(W*HlogH)
# Space:O(N)
```
#### File: joshlyman/Josh-LeetCode/328_Odd_Even_Linked_List.py
```python
class Solution:
def oddEvenList(self, head: ListNode) -> ListNode:
# build two linked list for odd and even
dummy1 = odd = ListNode(0)
dummy2 = even = ListNode(0)
while head:
odd.next = head
even.next = head.next
# move odd and even to next
odd = odd.next
even = even.next
# check if even is not None becaus even is always in ahead of odd
if even:
# start new head
head = head.next.next
else:
# stop the loop
head = None
# link odd and the first node of even (dummy2) together
odd.next = dummy2.next
return dummy1.next
# Time: O(N)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/329. Longest Increasing Path in a Matrix.py
```python
class Solution:
def __init__(self):
self.max_len = 0
self.table = {}
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
def dfs(x, y, prev):
if x < 0 or x >= len(matrix) or y < 0 or y >= len(matrix[0]) or matrix[x][y] <= prev:
return 0
if (x,y) in self.table:
return self.table[(x,y)]
path = 1 + max(dfs(x+1, y, matrix[x][y]), dfs(x-1, y, matrix[x][y]), dfs(x, y+1, matrix[x][y]), dfs(x, y-1, matrix[x][y]))
self.max_len = max(self.max_len, path)
self.table[(x,y)] = path
return path
for i in range(len(matrix)):
for j in range(len(matrix[0])):
# set up a very small number before (0,0)
dfs(i, j, -10000)
return self.max_len
# Time: O(MN)
# Space:O(MN)
```
#### File: joshlyman/Josh-LeetCode/339_Nested_List_Weight_Sum.py
```python
class Solution:
def depthSum(self, nestedList: List[NestedInteger]) -> int:
def DFS(nestedList, depth):
temp_sum = 0
for member in nestedList:
if member.isInteger():
temp_sum += member.getInteger() * depth
else:
temp_sum += DFS(member.getList(),depth+1)
return temp_sum
return DFS(nestedList,1)
Time: O(N)
Space:O(1)
# BFS
from collections import deque
class Solution:
def depthSum(self, nestedList: List[NestedInteger]) -> int:
res = 0
queue = deque([(n_int, 1) for n_int in nestedList])
while queue:
n_int, depth = queue.popleft()
if n_int.isInteger():
res += n_int.getInteger() * depth
else:
for i in n_int.getList():
queue.append((i, depth + 1))
return res
```
#### File: joshlyman/Josh-LeetCode/340_Longest_Substring_with_At_Most_K_Distinct_Characters.py
```python
class Solution(object):
"""
The general idea is to iterate over string s.
Always put the character c and its location i in the dictionary d.
1) If the sliding window contains less than or equal to k distinct characters, simply record the return value, and move on.
2) Otherwise, we need to remove a character from the sliding window.
Here's how to find the character to be removed:
Because the values in d represents the rightmost location of each character in the sliding window, in order to find the longest substring T, we need to locate the smallest location, and remove it from the dictionary, and then record the return value.
"""
def lengthOfLongestSubstringKDistinct(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
# Use dictionary d to keep track of (character, location) pair,
# where location is the rightmost location that the character appears at
d = {}
low, ret = 0, 0
for i, c in enumerate(s):
d[c] = i
if len(d) > k:
low = min(d.values())
del d[s[low]]
low += 1
ret = max(i - low + 1, ret)
return ret
# Time: O(N)
# Space:O(K)
Other solution:
from collections import Counter
class Solution:
def lengthOfLongestSubstringKDistinct(self, s: str, k: int) -> int:
n = len(s)
if k == 0 or n == 0:
return 0
# sliding window left and right pointers
left, right = 0, 0
# hashmap character -> its rightmost position
# in the sliding window
hashmap = defaultdict()
max_len = 1
while right < n:
# add new character and move right pointer
hashmap[s[right]] = right
right += 1
# slidewindow contains 3 characters
if len(hashmap) == k + 1:
# delete the leftmost character
del_idx = min(hashmap.values())
del hashmap[s[del_idx]]
# move left pointer of the slidewindow
left = del_idx + 1
max_len = max(max_len, right - left)
return max_len
# Time: O(N) in the best case of k distinct characters in the string and O(Nk) in
# the worst case of N distinct characters in the string.
# For the best case when input string contains not more than k distinct characters the answer is yes.
# It's the only one pass along the string with N characters and the time complexity is O(N).
# For the worst case when the input string contains n distinct characters, the answer is no.
# In that case at each step one uses O(k) time to find a minimum value in the hashmap with
# k elements and so the overall time complexity is O(Nk).
# Space:O(K) since additional space is used only for a hashmap with at most k + 1 elements.
# To achieve O(N) time
# Approach 2: Sliding Window + Ordered Dictionary
# There is a structure called ordered dictionary, it combines behind both hashmap and linked list.
# In Python this structure is called OrderedDict, which provides four operations in O(1) time:
# Insert the key, Get the key / Check if the key exists / Delete the key / Return the first / or the last added key/value
# The first three operations in O(1) time are provided by the standard hashmap,
# and the forth one - by linked list.
# So Only difference is that If ordered dictionary hashmap contains k + 1 distinct characters,
# remove the leftmost one and move the left pointer so that sliding window
# contains again k distinct characters only.
from collections import OrderedDict
class Solution:
def lengthOfLongestSubstringKDistinct(self, s: 'str', k: 'int') -> 'int':
n = len(s)
if k == 0 or n == 0:
return 0
# sliding window left and right pointers
left, right = 0, 0
# hashmap character -> its rightmost position
# in the sliding window
hashmap = OrderedDict()
max_len = 1
while right < n:
character = s[right]
# if character is already in the hashmap -
# delete it, so that after insert it becomes
# the rightmost element in the hashmap
if character in hashmap:
del hashmap[character]
hashmap[character] = right
right += 1
# slidewindow contains k + 1 characters
if len(hashmap) == k + 1:
# delete the leftmost character
_, del_idx = hashmap.popitem(last = False)
# move left pointer of the slidewindow
left = del_idx + 1
max_len = max(max_len, right - left)
return max_len
# Time: O(N) since all operations with ordered dictionary :
# insert/get/delete/popitem (put/containsKey/remove) are done in a constant time.
# Space: O(k) since additional space is used only for an ordered dictionary
# with at most k + 1 elements.
```
#### File: joshlyman/Josh-LeetCode/349_Intersection_of_Two_Arrays.py
```python
class Solution:
def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
return (set(nums2) & set(nums1))
# Time: O(n)
# Space: O(n)
```
#### File: joshlyman/Josh-LeetCode/387_First_Unique_Character_in_a_String.py
```python
class Solution:
def firstUniqChar(self, s: str) -> int:
d = {}
seen = set()
for idx, c in enumerate(s):
if c not in seen:
d[c] = idx
seen.add(c)
elif c in d:
del d[c]
return min(d.values()) if d else -1
Time: O(N)
Space:O(N)
class Solution:
def firstUniqChar(self, s: str) -> int:
count = collections.Counter(s)
# find the index
for idx, ch in enumerate(s):
if count[ch] == 1:
return idx
return -1
Time: O(N)
Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/392_Is_Subsequence.py
```python
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
spt = 0
tpt = 0
while spt < len(s) and tpt <len(t):
if s[spt] == t[tpt]:
spt+=1
tpt+=1
return spt == len(s)
# Time: O(|T|), Let∣S∣ be the length of the source string, and ∣T∣ be the length of the target string.
# Space:O(1)
# Other solution:
# https://leetcode.com/problems/is-subsequence/solution/
```
#### File: joshlyman/Josh-LeetCode/438_Find_All_Anagrams_in_a_String.py
```python
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
# use sliding window + hashmap, for each substring, use counter to store and compare with target substring
from collections import Counter
ns = len(s)
np = len(p)
if ns < np:
return []
pcount = Counter(p)
# put first len(p)-1 chars inside counter
scount = Counter(s[:len(p)-1])
output = []
# start from len(p) th char, each time compare substring with target
for i in range(len(p)-1,ns):
scount[s[i]]+=1
if scount == pcount:
output.append(i-len(p)+1)
scount[s[i-len(p)+1]]-=1
if scount[s[i-len(p)+1]] == 0:
del scount[s[i-len(p)+1]]
return output
# Time: O(Ns + Np)
# Space:O(1)
Counter might be slow, if need to make a counter:
class Solution:
def makeCounter(self, s):
d = {}
for char in s:
d[char] = d.get(char, 0) + 1
return d
def findAnagrams(self, s: str, p: str) -> List[int]:
counterP = self.makeCounter(p)
counterI = self.makeCounter(s[:len(p)])
result = []
for i in range(0, len(s)-len(p)+1):
if counterP == counterI:
result.append(i)
counterI[s[i]] -= 1
if counterI[s[i]] == 0:
del counterI[s[i]]
if i + len(p) < len(s):
counterI[s[i+len(p)]] = counterI.get(s[i+len(p)], 0) + 1
return result
```
#### File: joshlyman/Josh-LeetCode/463_Island_Perimeter.py
```python
class Solution:
def islandPerimeter(self, grid: List[List[int]]) -> int:
rows = len(grid)
cols = len(grid[0])
result = 0
for r in range(rows):
for c in range(cols):
if grid[r][c] == 1:
if r == 0:
up = 0
else:
up = grid[r-1][c]
if c == 0:
left = 0
else:
left = grid[r][c-1]
if r == rows-1:
down = 0
else:
down = grid[r+1][c]
if c == cols-1:
right = 0
else:
right = grid[r][c+1]
# check 4 directions and if one of them has island, then minus 1,if water, then it is 0
result += 4-(up+left+right+down)
return result
# Time: O(mn), where m is the number of rows of the grid and n is the number of columns of the grid.
# Since two for loops go through all the cells on the grid, for a two-dimensional grid of size m×n,
# the algorithm would have to check mnmn cells.
# Space:O(1)
# Better Counting
# Approach 2 has the same time and space complexity as Approach 1. Even though they have the same time and
# space complexities, Approach 2 is slightly more efficient than the Approach 1. Rather than checking
# 4 surrounding neighbors, we only need to check two neighbors (LEFT and UP) in Approach 2.
# Since we are traversing the grid from left to right, and from top to bottom, for each land
# cell we are currently at, we only need to check whether the LEFT and UP cells are land cells
# with a slight modification on previous approach.
# As you go through each cell on the grid, treat all the land cells as having a perimeter of 4 and add that up to the accumulated result.
# If that land cell has a neighboring land cell, remove 2 sides (one from each land cell) which will be touching between these two cells.
# If your current land cell has a UP land cell, subtract 2 from your accumulated result.
# If your current land cell has a LEFT land cell, subtract 2 from your accumulated result.
# subtract 2 means border do not count
# only consider left and up because right and down always have permeter
class Solution:
def islandPerimeter(self, grid: List[List[int]]) -> int:
rows = len(grid)
cols = len(grid[0])
result = 0
for r in range(rows):
for c in range(cols):
if grid[r][c] == 1:
result+=4
# check if left has water or not,
# if it is island, -2, because left of this island and right of left island (common border) do not count as permeter
if r>0 and grid[r-1][c] == 1:
result -=2
if c>0 and grid[r][c-1] == 1:
result -=2
return result
# Time: O(mn)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/490_The_Maze.py
```python
class Solution:
from collections import deque
def hasPath(self, maze: List[List[int]], start: List[int], destination: List[int]) -> bool:
dirs = [(0, 1), (0, -1), (-1, 0), (1, 0)] # up down left right
visited = [[False] * len(maze[0]) for _ in range(len(maze))]
visited[start[0]][start[1]] = True
q = deque([start])
while q:
tup = q.popleft()
if tup[0] == destination[0] and tup[1] == destination[1]:
return True
for dir in dirs:
# Roll the ball until it hits a wall
row = tup[0] + dir[0]
col = tup[1] + dir[1]
while 0 <= row < len(maze) and 0 <= col < len(maze[0]) and maze[row][col] == 0:
row += dir[0]
col += dir[1]
# x and y locates @ a wall when exiting the above while loop, so we need to backtrack 1 position
(new_x, new_y) = (row - dir[0], col - dir[1])
# Check if the new starting position has been visited
if not visited[new_x][new_y]:
q.append((new_x, new_y))
visited[new_x][new_y] = True
return False
# Time: O(MN)
# Space:O(MN)
# DFS
class Solution:
def hasPath(self, maze, start, destination):
m, n, stopped = len(maze), len(maze[0]), set()
def dfs(x, y):
if (x, y) in stopped:
return False
stopped.add((x, y))
if [x, y] == destination:
return True
for i, j in (-1, 0) , (1, 0), (0, -1), (0, 1):
newX, newY = x, y
while 0 <= newX + i < m and 0 <= newY + j < n and maze[newX + i][newY + j] != 1:
newX += i
newY += j
if dfs(newX, newY):
return True
return False
return dfs(*start)
# Time: O(MN)
# Space:O(MN)
```
#### File: joshlyman/Josh-LeetCode/503. Next Greater Element II.py
```python
Next Greater Element II.py<gh_stars>0
# Push the index on the stack. If the current number b is bigger than the last number a in the stack(found by index),
# then we find the next great element for a. Process it twice as it is a circular array to make sure that we
# can reread the next greater element after every element.
class Solution:
def nextGreaterElements(self, nums: List[int]) -> List[int]:
l = len(nums)
# search two rounds in a circle
nums = nums * 2
stack = []
# use -1 to initilize because if no found, return -1
res = [-1] * len(nums)
for idx, num in enumerate(nums):
while stack and num > nums[stack[-1]]:
res[stack.pop()] = num
stack.append(idx)
# only get first length of nums elements
return res[:l]
# Time: O(N)
# Space:O(N)
```
#### File: joshlyman/Josh-LeetCode/540. Single Element in a Sorted Array.py
```python
class Solution:
def singleNonDuplicate(self, nums: List[int]) -> int:
lo, hi = 0, len(nums) - 1
while lo < hi:
# or we can use %2 to see if mid is even or not
mid = 2 * ((lo + hi) // 4)
if nums[mid] == nums[mid+1]:
lo = mid+2
else:
hi = mid
return nums[lo]
# Time: O(logN)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/572_Subtree_of_Another_Tree.py
```python
class Solution:
def isSubtree(self, s: TreeNode, t: TreeNode) -> bool:
# if either one is empty, then false
if not s or not t:
return False
# check root node
if self.isSameTree(s, t):
return True
# check left and right node
if self.isSubtree(s.left, t) or self.isSubtree(s.right, t):
return True
# base case for successive
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
# both are none
if not p and not q:
return True
# either is None
if not p or not q:
return False
# both are not none, then compare each root and subtree
if p and q:
return p.val == q.val and self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
# Time: O(MN), A total of n nodes of the tree s and m nodes of tree t are traversed.
# Space:O(N), The depth of the recursion tree can go upto n. n refers to the number of nodes in s.
```
#### File: joshlyman/Josh-LeetCode/636_Exclusive_Time_of_Functions.py
```python
class Solution:
def exclusiveTime(self, n: int, logs: List[str]) -> List[int]:
ans = [0] * n
stack = []
prev_time = 0
for log in logs:
fn, typ, time = log.split(':')
fn, time = int(fn), int(time)
if typ == 'start':
if stack:
ans[stack[-1]] += time - prev_time
stack.append(fn)
prev_time = time
else:
ans[stack.pop()] += time - prev_time + 1
prev_time = time + 1
return ans
# Time: O(N)
# Space:O(N)
```
#### File: joshlyman/Josh-LeetCode/670_Maximum_Swap.py
```python
class Solution:
def maximumSwap(self, num: int) -> int:
s = list(str(num))
n = len(s)
for i in range(n-1): # find index where s[i] < s[i+1], meaning a chance to flip
if s[i] < s[i+1]:
break
else:
return num # if nothing find, return num
max_idx, max_val = i+1, s[i+1] # keep going right, find the maximum value index
for j in range(i+1, n):
if max_val <= s[j]:
max_idx, max_val = j, s[j] # going right from i, find most left value that is less than max_val
for j in range(i, -1, -1):
if s[j] < max_val:
left_idx = j
s[max_idx], s[left_idx] = s[left_idx], s[max_idx] # swap maximum after i and most left less than max
return int(''.join(s)) # re-create the integer
# Time: O(N)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/680_Valid_Palindrome_II.py
```python
class Solution:
def validPalindrome(self, s: str) -> bool:
# 1. brute force: to remove each single character to see if the resulting is a palindrome, O(n^2)
# 2. greedy approach: if s[i] == s[j] then we take i++, j--, o.w. must be either s[i+1].. s[j] or s[i]..s[j-1], because we can have at most 1 char not to be inside palindrome
left,right = 0,len(s)-1
while left < right:
if s[left] == s[right]:
left+=1
right-=1
else:
subarray1 = s[:left]+s[left+1:]
subarray2 = s[:right]+s[right+1:]
# [::-1] means flip the string, if flip still equal that means palindrome
return subarray1 == subarray1[::-1] or subarray2 == subarray2[::-1]
# if all matches, no need to delete char
return True
# Time: O(N)
# Space:O(N) because of additional memort from subarray 1 and 2
# my solution
class Solution:
def validPalindrome(self, s: str) -> bool:
left,right = 0,len(s)-1
while left< right:
if s[left] == s[right]:
left +=1
right -=1
else:
# skip left or right element to compare remaining elements
return self.helper(s, left +1,right) or self.helper(s,left,right-1)
# must be careful here for those true example when done, for example, aba
return True
def helper(self,s,left,right):
while left< right:
if s[left] == s[right]:
left +=1
right -=1
else:
return False
return True
# Time: O(N)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/703_Kth_Largest_Element_in_a_Stream.py
```python
class KthLargest:
def __init__(self, k: int, nums: List[int]):
self.pool = nums
self.k = k
heapq.heapify(self.pool)
# maintain a min heap which contains only k elements, so kth element will be the smallest element, which is kth largest element
while len(self.pool) > k:
heapq.heappop(self.pool)
def add(self, val):
if len(self.pool) < self.k:
# push item on the heap
heapq.heappush(self.pool, val)
else:
# Push item on the heap, then pop and return the smallest item from the heap.
heapq.heappushpop(self.pool, val)
# 1st one is the top one, smallest one in min heap
return self.pool[0]
# Your KthLargest object will be instantiated and called as such:
# obj = KthLargest(k, nums)
# param_1 = obj.add(val)
# In initialization:
# Time: O(logN)
# Space:O(N)
# In add:
# Time: O(logK) +O(1)
# Space:O(K)
# Heapfication is O(log n) time and O(n) space
# during initialization it appears to be O(log N) time and O(K) space,
# but for add, it's O(log K) + O(1) time and O(k) space as the heap size
# is constrained to K (NOT N where N is the input nums). The O(1) comes
# from having to read from the top of the heap to return the value which is constant-time.
```
#### File: joshlyman/Josh-LeetCode/819. Most Common Word.py
```python
class Solution:
def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:
for c in "!?',;.":
paragraph = paragraph.replace(c, " ")
d = {}
res = ""
count = 0
banned_words = set(banned)
for word in paragraph.lower().split():
if word in banned:
continue
elif word in d:
d[word] += 1
else:
d[word] = 1
if d[word] > count:
count = d[word]
res = word
return res
# Time: O(N+M): N be the number of characters in the input string and M be the number of characters in the banned list.
# Space:O(N+M)
```
#### File: joshlyman/Josh-LeetCode/825_Friends_Of_Appropriate_Ages.py
```python
class Solution:
def numFriendRequests(self, ages: List[int]) -> int:
count = [0]*121
for age in ages:
count[age]+=1
ans = 0
for ageA, countA in enumerate(count):
for ageB, countB in enumerate(count):
if ageA * 0.5 + 7 >= ageB:
continue
if ageA < ageB:
continue
if ageA < 100 < ageB:
continue
ans+= countA*countB
if ageA == ageB:
ans -= countA
return ans
# Time: O(A^2+N), where N is the number of people, and A is the number of ages.
# Space:O(A)
```
#### File: joshlyman/Josh-LeetCode/863. All Nodes Distance K in Binary Tree.py
```python
class Solution:
def convert_into_graph(self, node, parent, g):
# To convert into graph we need to know who is the parent
if not node:
return
if parent:
g[node].append(parent)
if node.right:
g[node].append(node.right)
self.convert_into_graph(node.right, node, g)
if node.left:
g[node].append(node.left)
self.convert_into_graph(node.left, node, g)
def distanceK(self, root: TreeNode, target: TreeNode, K: int) -> List[int]:
g = defaultdict(list)
vis, q, res = set(), deque(), []
# We have a graph, now we can use simply BFS to calculate K distance from node.
self.convert_into_graph(root, None, g)
q.append((target, 0))
while q:
n, d = q.popleft()
vis.add(n)
if d == K:
res.append(n.val)
# adjacency list traversal
for nei in g[n]:
if nei not in vis:
q.append((nei, d + 1))
return res
# Time: O(N)
# Space:O(N)
# V2
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def distanceK(self, root: TreeNode, target: TreeNode, K: int) -> List[int]:
# build graph: graph consists of node as key with nei nodes as value
self.g = collections.defaultdict(list)
self.convert_to_graph(root,None)
# build queue to store node waitting to be visited
self.queue = collections.deque()
self.visited = set()
self.res = []
# start from target node
self.queue.append((target,0))
while self.queue:
node,dist = self.queue.popleft()
self.visited.add(node)
if dist == K:
self.res.append(node.val)
for nei in self.g[node]:
if nei not in self.visited:
self.queue.append((nei,dist+1))
return self.res
# build a graph first to make each node as key and
def convert_to_graph(self,node,parent):
if not node:
return
if parent:
self.g[node].append(parent)
if node.right:
self.g[node].append(node.right)
self.convert_to_graph(node.right,node)
if node.left:
self.g[node].append(node.left)
self.convert_to_graph(node.left,node)
```
#### File: joshlyman/Josh-LeetCode/875. Koko Eating Bananas.py
```python
class Solution:
def minEatingSpeed(self, piles: List[int], H: int) -> int:
l, r = 1, max(piles)
while l < r:
m = l + (r-l) // 2
time = sum([math.ceil(i/m) for i in piles])
if time > H:
l = m + 1
else:
r = m
return l
Time: O(NlogW), where N is the number of piles, and W is the maximum size of a pile.
Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/896_Monotonic_Array.py
```python
class Solution:
def isMonotonic(self, A: List[int]) -> bool:
# two pass
# check adjacent elements
return all(A[i]<=A[i+1] for i in range(len(A)-1)) or all(A[i]>=A[i+1] for i in range(len(A)-1))
# Time: O(N)
# Space:O(1)
# One pass
class Solution(object):
def isMonotonic(self, A):
increasing = decreasing = True
for i in xrange(len(A) - 1):
if A[i] > A[i+1]:
increasing = False
if A[i] < A[i+1]:
decreasing = False
return increasing or decreasing
# Time: O(N)
# Space:O(1)
```
#### File: joshlyman/Josh-LeetCode/977_Squares_of_a_Sorted_Array.py
```python
class Solution(object):
def sortedSquares(self, A):
return sorted(x*x for x in A)
# Time: O(NlogN)
# Space:O(N)
# 2. Two Pointer
# Since the array A is sorted, loosely speaking it has some negative elements with squares in decreasing order, then
# some non-negative elements with squares in increasing order.
# For example, with [-3, -2, -1, 4, 5, 6], we have the negative part [-3, -2, -1] with squares [9, 4, 1],
# and the positive part [4, 5, 6] with squares [16, 25, 36]. Our strategy is to iterate over the negative part
# in reverse, and the positive part in the forward direction.
# We can use two pointers to read the positive and negative parts of the array - one pointer j in the positive direction,
# and another i in the negative direction.
# Now that we are reading two increasing arrays (the squares of the elements), we can merge these arrays together using a
# two-pointer technique.
class Solution(object):
def sortedSquares(self, A):
N = len(A)
# i, j: negative, positive parts
j = 0
while j < N and A[j] < 0:
j += 1
i = j - 1
ans = []
while 0 <= i and j < N:
if A[i]**2 < A[j]**2:
ans.append(A[i]**2)
i -= 1
else:
ans.append(A[j]**2)
j += 1
while i >= 0:
ans.append(A[i]**2)
i -= 1
while j < N:
ans.append(A[j]**2)
j += 1
return ans
# Time: O(N)
# Space:O(1)
return ans
```
#### File: joshlyman/Josh-LeetCode/989_Add_to_Array-Form_of_Integer.py
```python
for i in range(len(A) - 1, -1, -1):
K, A[i] = divmod(A[i] + K, 10)
return [int(i) for i in str(K)] + A if K else A
# Time: O(N)
# Space:O(1)
# mine
class Solution:
def addToArrayForm(self, A: List[int], K: int) -> List[int]:
A[-1]+=K
for i in range(len(A) - 1, -1, -1):
carry= A[i]//10
A[i] = A[i]%10
if i>0:
A[i-1]+= carry
if carry:
return [int(i) for i in str(carry)]+A
else:
return A
# Time: O(N)
# Space:O(1)
# V2
class Solution(object):
def addToArrayForm(self, A, K):
A[-1] += K
for i in xrange(len(A) - 1, -1, -1):
carry, A[i] = divmod(A[i], 10)
if i: A[i-1] += carry
if carry:
A = map(int, str(carry)) + A
return A
``` |
{
"source": "joshlyman/ROIShapeAnalysis",
"score": 3
} |
#### File: joshlyman/ROIShapeAnalysis/ROI_ShapeAnalysis_92.py
```python
from pylab import *
import SimpleITK
from matplotlib import pyplot as plt
import numpy as np
import csv
import os
import fnmatch
filename = '/Users/yanzhexu/Google Drive/Marley Grant Data/Marley Grant Data'
outcsv = '/Users/yanzhexu/Desktop/Research/ROI_ShapeDescriptors_92.csv'
#title = ['dicom file','compactness','entropy', 'bending energy','ratio(min/max)','perimeter','area','normalized_radius','min of radial length','max of radial length']
title = ['patient ID','phase','compactness','entropy','bending energy','ratio(min/max)']
# start to calculate area of contour, use green theory
def area(vs):
a = 0
x0,y0 = vs[0]
for [x1,y1] in vs[1:]:
dx = x1-x0
dy = y1-y0
a += 0.5*(y0*dx - x0*dy)
x0 = x1
y0 = y1
return a
#start to use green theory to get centroid of contour
def centroid_for_polygon(polygon,contourarea):
imax = len(polygon)-1
cx = 0
cy = 0
for i in range(0,imax):
cx += (polygon[i][0] + polygon[i+1][0]) * ((polygon[i][0] * polygon[i+1][1]) - (polygon[i+1][0] * polygon[i][1]))
cy += (polygon[i][1] + polygon[i+1][1]) * ((polygon[i][0] * polygon[i+1][1]) - (polygon[i+1][0] * polygon[i][1]))
cx += (polygon[imax][0] + polygon[0][0]) * ((polygon[imax][0] * polygon[0][1]) - (polygon[0][0] * polygon[imax][1]))
cy += (polygon[imax][1] + polygon[0][1]) * ((polygon[imax][0] * polygon[0][1]) - (polygon[0][0] * polygon[imax][1]))
cx /= (contourarea * 6.0)
cy /= (contourarea * 6.0)
Coorcentroid = (cx,cy)
return Coorcentroid
# find perimeter of contour
def find_perimeter(v):
imax = len(v) - 1
perimeter = 0
for i in range(0, imax):
perimeter += ((v[i][0] - v[i + 1][0]) ** 2 + (v[i][1] - v[i + 1][1]) ** 2) ** 0.5
return perimeter
# find radial length
def find_radial_length(sample, center):
radlen = list()
smax = len(sample)
for i in range(0, smax):
samplen = ((sample[i][0] - center[0]) ** 2 + (sample[i][1] - center[1]) ** 2) ** 0.5
radlen.append(samplen)
minradial = min(radlen)
maxradial = max(radlen)
return radlen, minradial, maxradial
# find radius
def find_radius(v, center):
imax = len(v)
radiussum = 0
radius_list = list()
for i in range(0, imax):
radius = ((v[i][0] - center[0]) ** 2 + (v[i][1] - center[1]) ** 2) ** 0.5
radius_list.append(radius)
radiussum += radius
radius = float(radiussum) / float(imax)
maxradius = max(radius_list)
normalized_radius = float(radius) / float(maxradius)
return normalized_radius, maxradius
# find curvature
def get_curvature(x, y):
curvlist = list()
dx = np.array(np.gradient(x))
dy = np.array(np.gradient(y))
d2x_dt2 = np.gradient(dx)
d2y_dt2 = np.gradient(dy)
for i in range(len(dx)):
divi = (dx[i] * dx[i] + dy[i] * dy[i]) ** 1.5
if divi != 0:
curvature = np.abs(d2x_dt2[i] * dy[i] - dx[i] * d2y_dt2[i]) / divi
curvature = curvature**2
curvlist.append(curvature)
else:
curvature = np.abs(d2x_dt2[i] * dy[i] - dx[i] * d2y_dt2[i])
curvature = curvature**2
curvlist.append(curvature)
return curvlist
# find bending energy
def BendingEnergy(sample,samplex,sampley):
imax = len(sample)
curvature = get_curvature(samplex,sampley)
sumcurv = sum(curvature)
be = float(sumcurv)/float(imax)
return curvature,be
num = 0
casenum = 0
with open(outcsv, 'wb') as CSVFile:
descriptorWriter = csv.writer(CSVFile, dialect='excel')
descriptorWriter.writerow(title)
for casefile in os.listdir(filename):
if casefile.startswith('.'):
continue
if casefile.startswith('..'):
continue
if fnmatch.fnmatch(casefile, '*Icon*'):
continue
# print casefile
filename2 = os.path.join(filename, casefile)
for phasefolder in os.listdir(filename2):
if phasefolder.startswith('.'):
continue
if phasefolder.startswith('..'):
continue
if fnmatch.fnmatch(phasefolder, '*Icon*'):
continue
if fnmatch.fnmatch(phasefolder, '*roi*'):
continue
filename3 = os.path.join(filename2, phasefolder)
for coorfile in os.listdir(filename3):
if coorfile.startswith('.'):
continue
if coorfile.startswith('..'):
continue
if fnmatch.fnmatch(coorfile, '*Icon*'):
continue
if fnmatch.fnmatch(coorfile, '*largest_rec*csv'):
continue
if fnmatch.fnmatch(coorfile, '*texture*'):
continue
if not fnmatch.fnmatch(coorfile, '*CC*csv'):
if not fnmatch.fnmatch(coorfile, '*MLO*csv'):
if not fnmatch.fnmatch(coorfile, '*LM*csv'):
continue
filename4 = os.path.join(filename3, coorfile)
if coorfile is None:
print'Lost coordinate CSV file'
# print coorfile
# dont have too much number of points so only use all vertice points
v = list()
vx = list()
vy = list()
num += 1
with open(filename4, 'r') as contourfile:
contourlist = csv.reader(contourfile)
row1 = next(contourlist)
row2 = next(contourlist)
numv = int(row2[14])
for i in range(numv):
columnx = 18 + 5 * i # column 19
columny = 19 + 5 * i # column 20
v.append([float(row2[columnx]), float(row2[columny])])
vx.append(float(row2[columnx]))
vy.append(float(row2[columny]))
patientID = casefile.split('-')[0]
if fnmatch.fnmatch(coorfile, '*CC*') is True:
phasename = 'CC'
elif fnmatch.fnmatch(coorfile, '*MLO*') is True:
phasename = 'MLO'
elif fnmatch.fnmatch(coorfile, '*LM*') is True:
phasename = 'LM'
else:
phasename = coorfile.split('.')[0]
# print sample
contourarea = np.abs(area(v))
print patientID
print v
print len(vx)
print 'area:', contourarea
Coorcentroid = centroid_for_polygon(v, contourarea)
# print 'centroid of contour:', Coorcentroid
perimeter = find_perimeter(v)
# print 'perimeter:', perimeter
# find compactness
Compactness = float(perimeter ** 2) / float(contourarea)
# print 'compactness:',Compactness
radlen, minradial, maxradial = find_radial_length(v, Coorcentroid)
# print 'radial length list:',radlen
# print 'minimum of radial length:',minradial
# print 'maximum of radial length:',maxradial
normradius, maxradius = find_radius(v, Coorcentroid)
# print 'normalized radius:',normradius
# print 'maximum radius:',maxradius
# find difference between radial and radius
diff_rad = list()
numcom = 0
for i in range(0, len(v)):
normal_radial = float(radlen[i]) / float(maxradial)
compartworad = abs(normal_radial - normradius)
diff_rad.append(compartworad)
if compartworad <= 0.01:
numcom += 1
# find entropy of lesion contour
p = float(numcom) / float(len(v))
# print p
if p >= 1:
Entropy = -(p * math.log(p))
elif p <= 0:
Entropy = -((1 - p) * math.log(1 - p))
else:
Entropy = -(p * math.log(p) + (1 - p) * math.log(1 - p))
# print 'entropy:',Entropy
# find ratio of minimum to maximum radial length
radialratio = float(minradial) / float(maxradial)
# print 'ratio of minimum to maximum radial length:',radialratio
curvature, bendingenergy = BendingEnergy(v, vx, vy)
# print 'curvature:',curvature
# print 'bending energy:', bendingenergy
numsample = len(v)
shapedescriptors = [patientID,phasename, Compactness, Entropy, bendingenergy, radialratio]
descriptorWriter.writerow(shapedescriptors)
``` |
{
"source": "joshlyman/TextureAnalysis",
"score": 2
} |
#### File: TextureAnalysis/FeatureMaps/CreateFigures.py
```python
import matplotlib.pyplot as plt
import numpy as np
import os
import csv
import SimpleITK
import fnmatch
import xml.etree.ElementTree as ET
dcmfile = '/Users/yanzhexu/Google Drive/Marley Grant Data/CEDM-51/malignant/Pt50/Pt50 - LE - CC.dcm'
xmlfile = '/Users/yanzhexu/Google Drive/Marley Grant Data/CEDM-51/malignant/Pt50/VES_LCC.xml'
def drawplot(contourx1,contourx2,contoury1,contoury2,color,lw):
plt.plot([contourx1, contourx1], [contoury1, contoury2], color,linewidth =lw )
plt.plot([contourx1, contourx2], [contoury1, contoury1], color,linewidth =lw)
plt.plot([contourx1, contourx2], [contoury2, contoury2], color,linewidth =lw)
plt.plot([contourx2, contourx2], [contoury1, contoury2], color,linewidth =lw)
def drawbox(xcoord,ycoord,width,height,color,w):
localx1 = xcoord
localx2 = xcoord + width
localy1 = ycoord
localy2 = ycoord + height
drawplot(localx1, localx2, localy1, localy2, color,w)
# plot xml boundary plot of dicom image
def ParseXMLDrawROI(rootDir,color,width):
tree = ET.parse(rootDir)
root = tree.getroot()
xcoordlist = list()
ycoordlist = list()
for child in root.iter('string'):
if not fnmatch.fnmatch(child.text,'*{*}*'):
continue
xcoords = str(child.text).split(',')[0]
ycoords = str(child.text).split(',')[1]
xc = float(xcoords.split('{')[1])
yc = float(ycoords.split('}')[0].replace(' ',''))
xcoordlist.append(xc)
ycoordlist.append(yc)
xcoordlist.append(xcoordlist[0])
ycoordlist.append(ycoordlist[0])
return xcoordlist,ycoordlist
# plt.plot(xcoordlist,ycoordlist,color,linewidth = width)
# read 2D dicom image
def Read2DImage(fileName, rotateAngle=0):
rawImage = SimpleITK.ReadImage(fileName)
imgArray = SimpleITK.GetArrayFromImage(rawImage)
# Convert 3D Image to 2D
if len(imgArray.shape) == 3:
imgArray = imgArray[0, :, :]
return imgArray
def GrayScaleNormalization(imgArray, imgMax,imgMin):
# try:
imgRange = imgMax - imgMin
imgArray = (imgArray - imgMin) * (255.0 / imgRange)
# transfer to closest int
imgArray = np.rint(imgArray).astype(np.int16)
# except ValueError:
# pass
return imgArray
dicomImage = Read2DImage(dcmfile)
# plt.figure(figsize= (18,13))
plt.figure(figsize=(20,15))
xcoordlist, ycoordlist = ParseXMLDrawROI(xmlfile,'r',2)
plt.imshow(dicomImage,cmap='gray')
# xcoord = 141
# ycoord = 2332
# width = 180
# height = 161
#
xlarge = max(xcoordlist)
xsmall = min(xcoordlist)
ylarge = max(ycoordlist)
ysmall = min(ycoordlist)
width = xlarge - xsmall
height = ylarge - ysmall
drawbox(xsmall,ysmall,width,height,'r',2)
# plt.show()
imgpath = '/Users/yanzhexu/Desktop/Research/featureMap/Pt50Figures/'
title = 'Pt50 LE CC ROI Box'
plt.title(title)
plt.savefig(imgpath + title + '.png',bbox_inches='tight')
plt.cla()
plt.close()
# plt.scatter(xlist, ylist, c=featurelist, alpha=0.5, cmap='gray')
# # plt.imshow(np.transpose(z),alpha= 1,cmap= 'gray') # change interpolation: "bilinear"
# # plt.colorbar()
# # plt.imshow(ImageArray,alpha=0.6,cmap='gray')
#
# plt.imshow(ImageArray,alpha = 1, cmap='gray')
# #
# title = 'Pt1 LE CC'
# plt.savefig(imgpath + title + '.png')
# plt.cla()
# plt.close()
# fig, (ax0, ax1) = plt.subplots(ncols=2,
# figsize=(12, 4),
# sharex=True,
# sharey=True,
# subplot_kw={"adjustable": "box-forced"})
# En = entropy(subImage, disk(5))
#
# print En
#
#
# plt.imshow(En, cmap='gray')
# # plt.set_title("Entropy")
# # ax1.axis("off")
# plt.title('')
# plt.tight_layout()
#
```
#### File: TextureAnalysis/Gabor/MaxACRExtendGaborFeatures.py
```python
import numpy as np
from skimage.filters import gabor_kernel
from scipy import ndimage
from scipy.stats import kurtosis
from scipy.stats import skew
def GrayScaleNormalization(imgArray, imgMax,imgMin):
imgRange = imgMax - imgMin
imgArray = (imgArray - imgMin) * (255.0 / imgRange)
# transfer to closest int
imgArray = np.rint(imgArray).astype(np.int16)
return imgArray
def genKernelBank(sigma_range, freq_range, out_kernel_bank):
for i in range(4):
theta = i / 4. * np.pi
kernel_bank_per_theta = []
for sigma in sigma_range:
for freq in freq_range:
kernel = np.real(gabor_kernel(freq, theta=theta, sigma_x=sigma, sigma_y=sigma))
kernel_bank_per_theta.append(kernel)
out_kernel_bank.append(kernel_bank_per_theta)
return out_kernel_bank
def MinMaxSubImageGen(subImage1,subImage2,subImage3,subImage4,height,width):
# initilize min and max image matrix
minImage = np.zeros((height, width))
maxImage = np.zeros((height, width))
# find min / max gray scale in each point of 4 subimage matrix
for yi in range(height):
for xi in range(width):
gl1 = subImage1[yi, xi]
gl2 = subImage2[yi, xi]
gl3 = subImage3[yi, xi]
gl4 = subImage4[yi, xi]
mingl = min(gl1, gl2, gl3, gl4)
maxgl = max(gl1, gl2, gl3, gl4)
# put min/max gray scale in min / max image matrix
minImage[yi, xi] = mingl
maxImage[yi, xi] = maxgl
return minImage,maxImage
def calcFeatures(dicomimg1, dicomimg2, dicomimg3, dicomimg4, xcoord1, ycoord1, xcoord2,ycoord2,xcoord3,ycoord3,
xcoord4,ycoord4, width, height, kernel_bank,maxsubimage,minsubimage):
# xcoord and ycoord start from leftmost coordinate
n_kernel_per_theta = len(kernel_bank[0])
#print n_kernel_per_theta
resultMean = np.zeros((4, n_kernel_per_theta))
resultStd = np.zeros((4, n_kernel_per_theta))
resultKurtosis = np.zeros((4,n_kernel_per_theta))
resultSkewness = np.zeros((4,n_kernel_per_theta))
for theta in range(4):
for i, kernel in enumerate(kernel_bank[theta]):
x_ext_radius = (kernel.shape[0] + 1) / 2
y_ext_radius = (kernel.shape[1] + 1) / 2
subImageGabor1 = dicomimg1[ycoord1 - y_ext_radius:(ycoord1 + height) + y_ext_radius, xcoord1 - x_ext_radius:(xcoord1 + width) + x_ext_radius]
subImageGabor2 = dicomimg2[ycoord2 - y_ext_radius:(ycoord2 + height) + y_ext_radius, xcoord2 - x_ext_radius:(xcoord2 + width) + x_ext_radius]
subImageGabor3 = dicomimg3[ycoord3 - y_ext_radius:(ycoord3 + height) + y_ext_radius, xcoord3 - x_ext_radius:(xcoord3 + width) + x_ext_radius]
subImageGabor4 = dicomimg4[ycoord4 - y_ext_radius:(ycoord4 + height) + y_ext_radius, xcoord4 - x_ext_radius:(xcoord4 + width) + x_ext_radius]
Gaborheight = height + 2 * y_ext_radius
Gaborwidth = width + 2 * x_ext_radius
# get min/ max subimage of extended Gabor subimage
MinGaborSubImage, MaxGaborSubImage = MinMaxSubImageGen(subImageGabor1, subImageGabor2, subImageGabor3, subImageGabor4,Gaborheight, Gaborwidth)
# print xcoord1,ycoord1
# print height,width
# print y_ext_radius,x_ext_radius
# gaborsize = np.shape(MinGaborSubImage)
# print 'gabor size:',gaborsize
roi_in = GrayScaleNormalization(MaxGaborSubImage,maxsubimage,minsubimage)
roi_out = np.zeros(roi_in.shape)
ndimage.filters.convolve(roi_in, kernel, output=roi_out, mode='constant', cval=0.0)
zoom_roi_out = roi_out[y_ext_radius:y_ext_radius + height, x_ext_radius:x_ext_radius+ width]
#zoominsize = np.shape(zoom_roi_out)
#print 'zoom in size:',zoominsize
resultMean[theta][i] = zoom_roi_out.mean()
resultStd[theta][i] = zoom_roi_out.std()
# add Kurtosis and Skewness into Gabor pipeline
outlist = list()
zoom_roi_out_list = zoom_roi_out.tolist()
for smalllist in zoom_roi_out_list:
outlist+=smalllist
resultKurtosis[theta][i] = kurtosis(outlist)
resultSkewness[theta][i] = skew(outlist)
out_mean_vec = np.mean(resultMean, axis=0)
out_std_vec = np.mean(resultStd, axis=0)
out_kurtosis_vec = np.mean(resultKurtosis,axis=0)
out_skew_vec = np.mean(resultSkewness,axis=0)
gaborfeatures = np.column_stack((out_mean_vec,out_std_vec,out_kurtosis_vec,out_skew_vec))
return gaborfeatures
```
#### File: EOR_Andrea/EOR_Andrea_QualityControl/DrawColormaps.py
```python
import scipy.io as sio
import os
import numpy as np
import csv
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
import fnmatch
import SimpleITK
dir = '/Users/yanzhexu/Dropbox/EOR_ML_PI_Shared Regridded_Data/'
# T1, T2 change here
outDir = '/Users/yanzhexu/Desktop/Research/EOR_Andrea/EOR_PI_featuresMap_from_Andrea/'
def drawColorMap(Tfile,caseoutDir,casefolder,T):
Tmat = sio.loadmat(Tfile)
Tarray = Tmat['u']
Tdim = np.shape(Tarray)
ylistdim = Tdim[0]
xlistdim = Tdim[1]
ttslicenum = Tdim[2]
for si in range(ttslicenum):
plist = list()
xlist = list()
ylist = list()
for xi in range(xlistdim):
for yi in range(ylistdim):
# in matlab, coordinate is larger than python for 1 pixel, they give slicenum, we define the x and y,
# T2array starts from 0 after being imported
pvalue = Tarray[yi,xi,si]
# need +1
if pvalue !=0:
ylist.append(yi+1)
xlist.append(xi+1)
plist.append(pvalue)
# plt.figure(figsize=(18, 13))
plt.figure()
cm = plt.cm.get_cmap('jet')
plt.scatter(xlist, ylist, c=plist, vmin=0, vmax=1, cmap=cm)
plt.colorbar()
plt.title(casefolder+' slice '+ str(si+1) + ' '+T+' PI',fontsize=20)
plt.savefig(caseoutDir + '/'+casefolder+' slice'+ str(si+1) + ' '+T+' PI.png')
plt.cla()
plt.close()
def drawTColormaps(dir,outDir,T):
for casefolder in os.listdir(dir):
if fnmatch.fnmatch(casefolder,"*.dropbox*"):
continue
if fnmatch.fnmatch(casefolder,'*.DS_Store*'):
continue
if fnmatch.fnmatch(casefolder,'*Icon*'):
continue
print casefolder
casefolderdir = os.path.join(dir,casefolder)
T1Matname = casefolder + '_fromT1Gd.mat'
T2Matname = casefolder + '_fromT2.mat'
T1filedir = os.path.join(casefolderdir,T1Matname)
T2filedir = os.path.join(casefolderdir,T2Matname)
ToutDir = outDir + '/'+ T + 'PI/'
caseoutDir =os.path.join(ToutDir,casefolder)
if not os.path.exists(caseoutDir):
os.makedirs(caseoutDir)
if T == 'T1':
drawColorMap(T1filedir, caseoutDir, casefolder,T)
else:
drawColorMap(T2filedir, caseoutDir, casefolder,T)
# change T1, T2 here
drawTColormaps(dir,outDir,'T2')
```
#### File: GBM/GBMcolormap/Compare_Results.py
```python
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import os
import csv
import SimpleITK
import fnmatch
comparefilepath = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/ColorMap/Comparion.csv'
imgpath = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/Hu Data Tumor Masks_V3/'
outputfolder = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/ColorMap/Comparsion/'
# ptsnamelist = ['RW','JTy','CE','RGr','SF','RIv','RGl','EB','ET','SH','VBr','CG','JM','MW','NT','PC','RA','SA']
# get biopsy coordinates
# biopsycoordinatefile = GBMslidingWindowBoxMappingCoordinate.getCoordinatefiles(mapDir, coorDir)
# store mapping patients folder name in ptsdict
ptsdict = dict()
for ptsfolder in os.listdir(imgpath):
if ptsfolder.startswith('.'):
continue
if ptsfolder.startswith('..'):
continue
if fnmatch.fnmatch(ptsfolder,'*Icon*'):
continue
if fnmatch.fnmatch(ptsfolder,'*README*'):
continue
if fnmatch.fnmatch(ptsfolder,'*csv'):
continue
if fnmatch.fnmatch(ptsfolder,'*xlsx'):
continue
if fnmatch.fnmatch(ptsfolder,'*Validation*'):
continue
print ptsfolder
patientname = ptsfolder.split('_')[0]
if fnmatch.fnmatch(patientname, "*FSL*"):
newpatientname = patientname.replace("FSL", "")
elif fnmatch.fnmatch(patientname, "*h*"):
newpatientname = patientname.replace("h", "")
else:
newpatientname = patientname
print newpatientname
ptsfolderpath = os.path.join(imgpath,ptsfolder)
for T2matfile in os.listdir(ptsfolderpath):
if T2matfile.startswith('.'):
continue
if T2matfile.startswith('..'):
continue
if fnmatch.fnmatch(T2matfile, '*Icon*'):
continue
if fnmatch.fnmatch(T2matfile,'*T2mask*mat'):
T2mat = T2matfile
print T2mat
T2matfilepath = ptsfolder+'/'+T2mat
ptsdict[newpatientname] = T2matfilepath
print ptsdict
def genptlist(comparefilepath):
with open(comparefilepath, 'r') as predictFile:
predictFile.readline()
rowFile = csv.reader(predictFile, delimiter=',')
ptidlist = list()
slicenolist = list()
xlist = list()
ylist = list()
for row in rowFile:
if row[0] =='':
continue
ptidlist.append(str(row[0]))
xlist.append(row[9])
ylist.append(row[10])
slicenolist.append(row[8])
return ptidlist,xlist,ylist,slicenolist
ptidlist,xlist,ylist,slicenolist = genptlist(comparefilepath)
# get plist from Hu data dict
outfile = outputfolder + 'result.csv'
i = 0
pvaluelist = list()
rowtitle = ['patientid','slicenum','X','Y','Prediction','Window_mean_Prediction','Window_std_Prediction']
with open(outfile, 'wb') as featureCSVFile:
featureWriter = csv.writer(featureCSVFile, dialect='excel')
featureWriter.writerow(rowtitle)
for pt in ptidlist:
rowlist = list()
# get biopsy sample coordinates
x = xlist[i]
y = ylist[i]
slicenum = slicenolist[i]
print pt
# get biopsy folder path
ptsfolder = ptsdict[pt]
print ptsfolder
# get biopsy mat file and predictions matrix
T2matfile = os.path.join(imgpath, ptsfolder)
T2mat = sio.loadmat(T2matfile)
T2array = T2mat['u']
# get biopsy P value
pvalue = T2array[y,x,slicenum]
pvaluelist.append(pvalue)
print slicenum
print x,y
print 'prediction:',pvalue
xstr = str(x)
ystr = str(y)
slicenumstr = str(slicenum)
pstr = str(pvalue)
rowlist.append(pt)
rowlist.append(slicenumstr)
rowlist.append(xstr)
rowlist.append(ystr)
rowlist.append(pstr)
# start to get average sliding window pvalue and std of all pixels
pivaluelist = list()
for xi in range(int(x)-4,int(x)+4):
for yi in range(int(y)-4,int(y)+4):
pivalue = T2array[yi,xi,slicenum]
pivaluelist.append(pivalue)
print 'number of pixels:',len(pivaluelist)
print'\n'
# get mean and std of window's all pixels prediction value
meanwindowpvalue = np.mean(pivaluelist)
stdwindowpvalue = np.std(pivaluelist)
rowlist.append(str(meanwindowpvalue))
rowlist.append(str(stdwindowpvalue))
featureWriter.writerow(rowlist)
i+=1
#print pvaluelist
```
#### File: GBM/GBMcolormap/InterSliceColor.py
```python
import matplotlib.pyplot as plt
import numpy as np
import os
import csv
import SimpleITK
import fnmatch
import xml.etree.ElementTree as ET
import re
predfile = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/ColorMap/CE22_0413/tumorContent_slice22_PDGFRA_alpha45000.csv'
outputMapDir = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/ColorMap/CE22_0413/'
outputfile = 'NewPDGFRA.csv'
def generatexyp(predictionfile):
predfile = predictionfile
i = 0
with open(predfile, 'r') as predictFile:
rowFile = csv.reader(predictFile, delimiter=',')
xlist = list()
ylist = list()
plist = list()
for row in rowFile:
i+=1
xlist.append(int(row[0]))
ylist.append(int(row[1]))
plist.append(float(row[2]))
# print i
return xlist,ylist,plist
xlist, ylist, plist = generatexyp(predfile)
newxlist = list()
newylist = list()
newplist = list()
for i in range(len(ylist)):
cx = xlist[i]
cy = ylist[i]
cp = plist[i]
if i == len(ylist)-1:
newxlist.append(cx)
newylist.append(cy)
newplist.append(cp)
else:
ax = xlist[i+1]
ay = ylist[i + 1]
ap = plist[i+1]
# print 'why1'
if cy !=ay:
# print 'why2'
newxlist.append(cx)
newylist.append(cy)
newplist.append(cp)
else:
# print 'why3'
newxlist.append(cx)
newylist.append(cy)
newplist.append(cp)
if ap > cp:
# print 'why4'
diff = ap - cp
nop = ax - cx
intediff = diff/nop
for ii in range(nop-1):
interx = cx + (ii + 1)
interp = cp + (ii + 1)*intediff
intery = cy
# print interx
# print interp
# print intery
newxlist.append(interx)
newylist.append(intery)
newplist.append(interp)
else:
# print 'why5'
diff = cp - ap
nop = ax - cx
intediff = diff / nop
for ii in range(nop-1):
interx = cx + (ii + 1)
interp = cp - (ii + 1) * intediff
intery = cy
# print interx
# print interp
# print intery
newxlist.append(interx)
newylist.append(intery)
newplist.append(interp)
# print newxlist
# print newylist
# print newplist
outputpath = outputMapDir + outputfile
with open(outputpath, 'wb') as csvoutput:
csvwriter = csv.writer(csvoutput, dialect='excel')
i = 0
for ei in range(len(newxlist)):
newx = newxlist[ei]
newy = newylist[ei]
newp = newplist[ei]
row = [str(newx),str(newy),str(newp)]
csvwriter.writerow(row)
i+=1
print i
```
#### File: GBM/GBMcolormap/SlidingWindowColorMap.py
```python
import matplotlib.pyplot as plt
import numpy as np
import os
import csv
import SimpleITK
import fnmatch
import xml.etree.ElementTree as ET
from matplotlib import cm
rootDir = '/Users/yanzhexu/Dropbox/Prediction/VBr_slice18_T2_ROI_Texture_Map_PredictY6.csv'
dcmDir = '/Users/yanzhexu/Desktop/Research/GBM/aCGH_whole_tumor_maps_for_Neuro-Onc_dataset/VBrFSL_slices_only/slice18/'
imgpath = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/ColorMap/'
with open(rootDir, 'r') as roiFile:
roiFile.readline()
rowFile = csv.reader(roiFile, delimiter=',')
xlist = list()
ylist = list()
p2list = list()
for row in rowFile:
xlist.append(int(row[0]))
ylist.append(int(row[1]))
p2list.append(float(row[6]))
def ParseXMLDrawROI(rootDir,color,width):
tree = ET.parse(rootDir)
root = tree.getroot()
xcoordlist = list()
ycoordlist = list()
for child in root.iter('string'):
if not fnmatch.fnmatch(child.text,'*{*}*'):
continue
xcoords = str(child.text).split(',')[0]
ycoords = str(child.text).split(',')[1]
xc = float(xcoords.split('{')[1])
yc = float(ycoords.split('}')[0].replace(' ',''))
xcoordlist.append(xc)
ycoordlist.append(yc)
xcoordlist.append(xcoordlist[0])
ycoordlist.append(ycoordlist[0])
plt.plot(xcoordlist,ycoordlist,color,linewidth = width )
def Read2DImage(fileName, rotateAngle=0):
rawImage = SimpleITK.ReadImage(fileName)
imgArray = SimpleITK.GetArrayFromImage(rawImage)
# Convert 3D Image to 2D
if len(imgArray.shape) == 3:
imgArray = imgArray[0, :, :]
return imgArray
dcmfile = 'Ax_T2_FSE_IM-0001-0018.dcm'
dcmfilepath = os.path.join(dcmDir,dcmfile)
dicomImage = Read2DImage(dcmfilepath)
T2xml = 'VB_ROI_Ax T2 FSE INTER (registered) (RESEARCH).xml'
T2xmlpath = os.path.join(dcmDir,T2xml)
T1xml = 'VB_ROI_+C_Ax_T1_MP_SPGR_IM-0004-0018.xml'
T1xmlpath = os.path.join(dcmDir,T1xml)
plt.figure(figsize= (18,13))
#plt.figure()
plt.scatter(xlist, ylist, c=p2list,vmin = 0, vmax =1)
ParseXMLDrawROI(T2xmlpath,'b',0.5)
ParseXMLDrawROI(T1xmlpath,'m.-',3)
plt.colorbar()
plt.imshow(dicomImage,cmap='gray')
plt.title('VBr slice18 T2_Y6')
plt.show()
#plt.savefig(imgpath + 'VBr slice18 T2 Y6.png')
plt.cla()
plt.close()
```
#### File: TextureAnalysis/GBM/GBMSlidingBox.py
```python
import csv
import fnmatch
import os
import SimpleITK
import numpy
from mahotas.features.texture import haralick_labels
from GLCM import GLCMFeatures
from Gabor import ExtendGaborFeatures
from LBP import ExtendLBPFeatures
def Read2DImage(fileName, rotateAngle=0):
rawImage = SimpleITK.ReadImage(fileName)
imgArray = SimpleITK.GetArrayFromImage(rawImage)
# Convert 3D Image to 2D
if len(imgArray.shape) == 3:
imgArray = imgArray[0, :, :]
return imgArray
def GrayScaleNormalization(imgArray, imgMax,imgMin):
imgRange = imgMax - imgMin
imgArray = (imgArray - imgMin) * (255.0 / imgRange)
# transfer to closest int
imgArray = numpy.rint(imgArray).astype(numpy.int16)
return imgArray
rootDir = '/Users/yanzhexu/Desktop/Research/GBM/T1 and T2 texture Maps - grid spacing 4/JTyFSL_slice15_T1 ROI_texture_map.csv'
# rootDir = '/Users/yanzhexu/Desktop/Research/GBM/T1 and T2 texture Maps - grid spacing 4/CEFSL_slice22_T1 ROI_texture_map.csv'
#mapfileDir = '/Users/yanzhexu/Desktop/Research/GBM/aCGH_whole_tumor_maps_for_Neuro-Onc_dataset/CEFSL_slices_only/slice22'
mapfileDir = '/Users/yanzhexu/Desktop/Research/GBM/aCGH_whole_tumor_maps_for_Neuro-Onc_dataset/JTyFSL_slices_only/slice15'
outputDir = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/SlidingWindows_Experiments'
#featuresOutFn = 'CEFSL_22_Original.csv'
featuresOutFn = 'JTyFSL_15_Comparison.csv'
GLCMAngleList = ['Avg']
featureTitle = ['image Contrast', 'image file name', 'Y coordinate', 'X coordinate']
for GLCMAngle in GLCMAngleList:
for featureName in haralick_labels[:-1]:
featureTitle.append(featureName + '_' + GLCMAngle)
featuresCSVFn = os.path.join(outputDir, featuresOutFn)
with open(featuresCSVFn, 'wb') as featureCSVFile:
featureWriter = csv.writer(featureCSVFile, dialect='excel')
featureWriter.writerow(featureTitle)
with open(rootDir, 'r') as roiFile:
for i in range(9):
roiFile.readline()
rowFile = csv.reader(roiFile, delimiter=',')
for row in rowFile:
print row
imgContrastname = row[0]
print imgContrastname
imgFilename = row[1]
print imgFilename
ycoord = int(float(row[2]))
print ycoord
xcoord = int(float(row[3]))
print xcoord
aFeature = [imgContrastname, imgFilename, ycoord, xcoord]
dicomfile = os.path.join(mapfileDir,imgFilename)
dicomImage = Read2DImage(dicomfile)
subImage = dicomImage[ycoord - 4:ycoord + 4, xcoord - 4:xcoord + 4]
subImageGLCM = GrayScaleNormalization(subImage, subImage.max(), subImage.min())
# GLCM
glcmFeatures = GLCMFeatures.calcFeatures(subImageGLCM)
for GLCMAngle in GLCMAngleList:
for featureName in haralick_labels[:-1]:
aFeature.append(glcmFeatures[GLCMAngle][featureName])
featureWriter.writerow(aFeature)
```
#### File: TextureAnalysis/GBM/XMLTest.py
```python
import xml.etree.ElementTree as ET
import fnmatch
import matplotlib.pyplot as plt
import numpy as np
import math
import os
#rootDir = '/Users/yanzhexu/Desktop/Research/GBM/aCGH_whole_tumor_maps_for_Neuro-Onc_dataset/CEFSL_slices_only/slice22/ROI for +C_3D_AXIAL_IRSPGR_Fast_IM-0005-0022.xml'
# test if all XML data can plot ROI and check inside pts
rootDir = '/Users/yanzhexu/Desktop/Research/GBM/aCGH_whole_tumor_maps_for_Neuro-Onc_dataset'
#outputDir = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithm/boundarycheck/'
outputDir = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithm/ROI_SlidingWindows_Image/'
# check if point is inside ROI boundary or outside boundary
def point_inside_polygon(x,y,poly):
n = len(poly)
inside =False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# draw contour rectangle plots
def drawplot(contourx1,contourx2,contoury1,contoury2,color):
plt.plot([contourx1, contourx1], [contoury1, contoury2], color)
plt.plot([contourx1, contourx2], [contoury1, contoury1], color)
plt.plot([contourx1, contourx2], [contoury2, contoury2], color)
plt.plot([contourx2, contourx2], [contoury1, contoury2], color)
# draw whole rectangle plots (get ceil and floor)
def drawSameRangeScalePlot(contourx1,contourx2,contoury1,contoury2, interval,color):
# contoury1 = 40
# contoury2 = 240
# contourx1 = 40
# contourx2 = 240
# ceil: get higher int
# floor: get lower int
# contourx1 = int(math.floor(contourx1))
# contourx2 = int(math.ceil(contourx2))
# contoury1 = int(math.floor(contoury1))
# contoury2 = int(math.ceil(contoury2))
for yinterval in range(contoury1, contoury2, interval):
plt.plot([contourx1, contourx2], [yinterval, yinterval], color)
for xinterval in range(contourx1, contourx2, interval):
plt.plot([xinterval, xinterval], [contoury1, contoury2], color)
# check if coords inside boundary or outside boundary
def chooseinoutcoord(contourx1,contourx2,contoury1,contoury2,xycoord):
# 0: inside boundary, 1: on the boundary, 2: outside boundary
# xyboundarypos0 = list()
# xyboundarypos1 = list()
# xyboundarypos2 = list()
# for each point inside rectangle plot, check if each point inside boundary or outside boundary, inside: True, outside: False
for testx in range(contourx1,contourx2+1):
for testy in range(contoury1,contoury2+1):
# check if point is inside boundary or not
inorout = point_inside_polygon(testx, testy, xycoord)
# get diff between boundary pts and test (x,y) pts
# diffbound = list()
# for boundxy in xycoord:
# boundx = boundxy[0]
# boundy = boundxy[1]
# diff = abs(boundx-testx) + abs(boundy-testy)
# diffbound.append(diff)
# get closest diff between boundary pts and test (x,y)
#realdiff = min(diffbound)
# if inside boundary, then mark point as red
# if real dis between bound pts and pts is larger than specific distance, then it is inside the boundary
#if inorout == True and realdiff > 0.2:
if inorout == True:
plt.plot(testx, testy, 'r+')
# xyboundarypos0.append(list())
# xyboundarypos0[len(xyboundarypos0) - 1].append(testx)
# xyboundarypos0[len(xyboundarypos0) - 1].append(testy)
# if real dis between bound pts and pts is smaller than specific distance, then it is on the boundary
# elif inorout == True and realdiff <0.2:
# plt.plot(testx, testy, 'r+')
#
# x1 = testx - 4
# x2 = testx + 4
# y1 = testy - 4
# y2 = testy + 4
#
# drawplot(x1, x2, y1, y2, 'r')
#
# xyboundarypos1.append(list())
# xyboundarypos1[len(xyboundarypos1) - 1].append(testx)
# xyboundarypos1[len(xyboundarypos1) - 1].append(testy)
# if outside boundary, then mark point as green
else:
#elif inorout == False:
plt.plot(testx, testy, 'g+')
# xyboundarypos2.append(list())
# xyboundarypos2[len(xyboundarypos2) - 1].append(testx)
# xyboundarypos2[len(xyboundarypos2) - 1].append(testy)
#
# draw ROI from coordinates in XML file
def ParseXMLDrawROI(rootDir):
tree = ET.parse(rootDir)
root = tree.getroot()
childnum = 0
xcoordlist = list()
ycoordlist = list()
xycoordlist = list()
for child in root.iter('string'):
if not fnmatch.fnmatch(child.text,'*{*}*'):
continue
childnum+=1
#print child.text
#xycoord = list()
xcoords = str(child.text).split(',')[0]
ycoords = str(child.text).split(',')[1]
xc = float(xcoords.split('{')[1])
yc = float(ycoords.split('}')[0].replace(' ',''))
xcoordlist.append(xc)
ycoordlist.append(yc)
xycoordlist.append(list())
xycoordlist[len(xycoordlist) - 1].append(xc)
xycoordlist[len(xycoordlist) - 1].append(yc)
xcoordlist.append(xcoordlist[0])
ycoordlist.append(ycoordlist[0])
#xycoordlist.append(xycoordlist[0])
# get x/y min/max in coords
xmin = min(xcoordlist)
ymin = min(ycoordlist)
xmax = max(xcoordlist)
ymax = max(ycoordlist)
# draw contour rectangle plot
#drawplot(xmin,xmax,ymin,ymax,'g')
# fix X and Y axises range in plot
plt.xlim(40, 240)
plt.ylim(40, 240)
# draw whole rectangle plot
drawSameRangeScalePlot(40,240,40,240,8,'k')
# ceil: get higher int
# floor: get lower int
xmin = int(math.floor(xmin))
xmax = int(math.ceil(xmax))
ymin = int(math.floor(ymin))
ymax = int(math.ceil(ymax))
# check if coords inside boundary or outside boundary
chooseinoutcoord(xmin,xmax,ymin,ymax,xycoordlist)
# draw boundary plot of ROI
plt.plot(xcoordlist,ycoordlist,'b')
#plt.show()
def checkallXML(rootDir):
for texturemapfile in os.listdir(rootDir):
if texturemapfile.startswith('.'):
continue
if texturemapfile.startswith('..'):
continue
print texturemapfile
patientname = texturemapfile.split('_')[0]
if fnmatch.fnmatch(patientname,"*FSL*"):
newpatientname = patientname.replace("FSL","")
elif fnmatch.fnmatch(patientname,"*h*"):
newpatientname = patientname.replace("h","")
else:
newpatientname = patientname
print newpatientname
slicepathfile = os.path.join(rootDir,texturemapfile)
for slicefile in os.listdir(slicepathfile):
if slicefile.startswith('.'):
continue
if slicefile.startswith('..'):
continue
print slicefile
dcmxmlfilepath = os.path.join(slicepathfile,slicefile)
for xmlfile in os.listdir(dcmxmlfilepath):
if not fnmatch.fnmatch(xmlfile, '*.xml'):
continue
if fnmatch.fnmatch(xmlfile, '*NECROSIS*'):
continue
if fnmatch.fnmatch(xmlfile, '*C*SPGR*') or fnmatch.fnmatch(xmlfile, '*+C*T1*') or fnmatch.fnmatch(
xmlfile, '*T1*+C*'):
T1xmlfile = xmlfile
print T1xmlfile
if fnmatch.fnmatch(xmlfile, '*T2*'):
T2xmlfile = xmlfile
print T2xmlfile
T1xmlfilepath = os.path.join(dcmxmlfilepath,T1xmlfile)
T2xmlfilepath = os.path.join(dcmxmlfilepath,T2xmlfile)
# original image T1
plt.figure()
ParseXMLDrawROI(T1xmlfilepath)
plt.title(newpatientname + ' ' + ' ' + slicefile + ' T1')
plt.savefig(outputDir + newpatientname + ' ' + ' ' + slicefile + ' T1.png')
plt.cla()
plt.close()
# original image T2
plt.figure()
ParseXMLDrawROI(T2xmlfilepath)
plt.title(newpatientname + ' ' + ' ' + slicefile + ' T2')
plt.savefig(outputDir + newpatientname + ' ' + ' ' + slicefile + ' T2.png')
plt.cla()
plt.close()
checkallXML(rootDir)
```
#### File: TextureAnalysis/GUI/ExtendLBP_GUI.py
```python
import numpy
from skimage.feature import local_binary_pattern
def calcFeatures(img, nPoints, radius, method, skipZoomIn = False):
lbp = local_binary_pattern(img, nPoints, radius, method)
if not skipZoomIn:
imgSize = lbp.shape
lbp = lbp[radius:imgSize[0] - radius, radius:imgSize[1] - radius]
# lbpsize = numpy.shape(lbp)
# print lbp
# print 'lbp:',lbpsize
# can use normed or density option (eithor of two)
rawFeatures = numpy.histogram(lbp.ravel(), bins = nPoints + 2, normed = True)
# print rawFeatures
# print rawFeatures
# print rawFeatures[0]
return rawFeatures[0]
```
#### File: TextureAnalysis/GUI/GBM_SlidingWindow_GUI_FeaturesColorMap.py
```python
import os
import csv
import fnmatch
import SimpleITK
import matplotlib.pyplot as plt
# read 2D dicom image
def Read2DImage(fileName, rotateAngle=0):
rawImage = SimpleITK.ReadImage(fileName)
imgArray = SimpleITK.GetArrayFromImage(rawImage)
# Convert 3D Image to 2D
if len(imgArray.shape) == 3:
imgArray = imgArray[0, :, :]
return imgArray
def getPatientDicomImage(DicomrootDir,phasename,patient,slicenum):
for patientfolder in os.listdir(DicomrootDir):
if patientfolder.startswith('.'):
continue
if patientfolder.startswith('..'):
continue
if fnmatch.fnmatch(patientfolder,'*'+patient+'*'):
patientfolderpath = os.path.join(DicomrootDir,patientfolder)
for slicefile in os.listdir(patientfolderpath):
if slicefile.startswith('.'):
continue
if slicefile.startswith('..'):
continue
if fnmatch.fnmatch(slicefile,'*'+slicenum+'*'):
print slicefile
dcmfilepath = os.path.join(patientfolderpath, slicefile)
dcmfiledict = dict()
for dcmfile in os.listdir(dcmfilepath):
if dcmfile.startswith('.'):
continue
if fnmatch.fnmatch(dcmfile, '*dcm*') is False:
continue
if fnmatch.fnmatch(dcmfile, '*precontrast*'):
continue
if fnmatch.fnmatch(dcmfile, '*C*SPGR*') or fnmatch.fnmatch(dcmfile, '*+C*T1*') or fnmatch.fnmatch(
dcmfile, '*T1*+C*'):
SPGRCfile = dcmfile
dcmfiledict['SPGRC'] = SPGRCfile
if fnmatch.fnmatch(dcmfile, '*T2*'):
T2file = dcmfile
dcmfiledict['T2'] = T2file
if fnmatch.fnmatch(dcmfile, '*q*'):
Qfile = dcmfile
dcmfiledict['Q'] = Qfile
if fnmatch.fnmatch(dcmfile, '*p*'):
Pfile = dcmfile
dcmfiledict['P'] = Pfile
if fnmatch.fnmatch(dcmfile, '*rCBV*'):
RCBVfile = dcmfile
dcmfiledict['RCBV'] = RCBVfile
if fnmatch.fnmatch(dcmfile, '*EPI*+C*') or fnmatch.fnmatch(dcmfile, '*+C*EPI*'):
EPIfile = dcmfile
dcmfiledict['EPI'] = EPIfile
DicomImagefilpath = os.path.join(dcmfilepath,dcmfiledict[phasename])
return DicomImagefilpath
def getfeaturesfile(featuresrootDir,phasename,patient,slicenum):
for featuresfile in os.listdir(featuresrootDir):
if featuresfile.startswith('.'):
continue
if featuresfile.startswith('..'):
continue
if fnmatch.fnmatch(featuresfile, '*' + patient + '*' + 'slice'+ slicenum + '*' + phasename + '*csv*'):
# featuresfilepath = os.path.join(featuresrootDir,featuresfile)
return featuresfile
def genFeaturesColorMap(featuresrootDir,DicomrootDir,outputDir,feature,phasename,patient,slicenum):
DicomImage = getPatientDicomImage(DicomrootDir,phasename,patient,slicenum)
featuresfile =getfeaturesfile(featuresrootDir,phasename,patient,slicenum)
featuresfilepath = os.path.join(featuresrootDir,featuresfile)
dicomImage = Read2DImage(DicomImage)
with open(featuresfilepath,'rb') as csvfile:
rowFile = csv.reader(csvfile, delimiter=',')
xlist = list()
ylist = list()
featurelist = list()
featureindex = 0
for row in rowFile:
if row[0] == 'Phase':
featureindex = row.index(feature)
elif row[0] == phasename:
xlist.append(int(row[1]))
ylist.append(int(row[2]))
featurelist.append(float(row[featureindex]))
plt.figure(figsize=(18, 13))
cm = plt.cm.get_cmap('jet')
plt.scatter(xlist, ylist, c=featurelist, cmap=cm)
plt.colorbar()
plt.imshow(dicomImage, cmap='gray')
# plt.title(patient + ' slice' + slicenum + ' T2-based '+ phasename + ' '+ feature + ' FeatureMap', fontsize=30)
plt.savefig(outputDir + ' '+ patient + ' slice' + slicenum + ' T2-based ' + phasename + ' ' + feature + ' FeatureMap.png',bbox_inches='tight')
plt.cla()
plt.close()
featuresrootDir = '/Users/yanzhexu/Desktop/Research/TA GUI/GBM/'
DicomrootDir = '/Users/yanzhexu/Desktop/Research/GBM/aCGH_whole_tumor_maps_for_Neuro-Onc_dataset'
outputDir = '/Users/yanzhexu/Desktop/Research/TA GUI/GBM/'
# parameter:
# 1. featuresrootDir: input path of the features file folder
# 2. DicomrootDir: input path the patients folder
# 3. outputDir: output path to store each sliding window textures file
# 4. feature: which feature to plot feature map
# 4. phasename: modality: 'EPI', 'P', 'Q', 'RCBV', 'SPGRC', 'T2'
# 5. patient: 'CG','EB','ET','SA','SF','SH','VBr','CE','JM','JTy','MW','NT','PC','RA','RGl','RGr','Rlv','RWh'
# 6. slicenum: which slice of patient to choose
genFeaturesColorMap(featuresrootDir,DicomrootDir,outputDir,feature ='Raw Mean',phasename='P',patient='CE',slicenum='22')
```
#### File: TextureAnalysis/RossAlgorithm/GLCMTextureSecret.py
```python
from __future__ import print_function
import numpy as np
import mahotas
import mahotas.features
# perform any required intialization, and add any algorithm specific fields
# to the output header
def initAlgorithm( hdr ):
hdr[ "GLCM Entropy" ] = "log2"
return
def _getGLCMTestImage():
testImage = np.array( [[0,0,1,1], [0,0,1,1], [0,2,2,2], [2,2,3,3]] )
return testImage
def _getGLCMFeatureNames():
'''
Return a list of feature names computed from the GLCM
13 features are defined. Note:
- Uniformity is also called Angular 2nd Moment (ASM, used here) , and Energy ** 2
- Inverse Difference Moment is also called Homogeneity (used here)
'''
featureNames = [ 'GLCM ASM', 'GLCM Contrast', 'GLCM Correlation', 'GLCM Sum of Squares', 'GLCM Homogeneity', \
'GLCM Sum Avg', 'GLCM Sum Var', 'GLCM Sum Entropy', 'GLCM Entropy', 'GLCM Diff Var', \
'GLCM Diff Entropy', 'GLCM Info Meas Corr 1', 'GLCM Info Meas Corr 2' ]
return featureNames
#
# Calculate GLCM (Haralick) texture features for an image
#
def computeFeatures( image ):
# calculate the GLCM for each of 4 directions, then calculate and return 13 texture
# features for each direction. The 14th Haralick texture feature,
# "Maximal Correlation Coefficient" is not calculated.
# The Mahotas documentation claims this feature is 'unstable' and should not be used...
f = mahotas.features.haralick( image )
# calculate the mean feature value across all 4 directions
fMean = f.mean( 0 )
# calculate the range (peak-to-peak, ptp) of feature values across all 4 directions
fRange = f.ptp( 0 )
# 13 features are returned
# Uniformity is also called Angular 2nd Moment (ASM, used here) , and Energy ** 2
# Inverse Difference Moment is also called Homogeneity (used here)
featureNames = _getGLCMFeatureNames()
# create an empty dictionary to hold the feature name, mean and range values
d = {}
# fill each dictionary entry, with the name (text), followed by that feature's mean and range (a tuple of floats)
for i, name in enumerate( featureNames ):
d[ name + " Mean" ] = fMean[ i ]
d[ name + " Range" ] = fRange[ i ]
return d
``` |
{
"source": "joshmaerk/googlesheets_github_connector",
"score": 3
} |
#### File: googlesheets_github_connector/app/export_to_github.py
```python
from github import Github
from datetime import datetime
from config import Config
# First create a Github instance:
def get_repos():
# using an access token
g = Github(Config.ACCESS_TOKEN)
# Then play with your Github objects:
for repo in g.get_user().get_repos():
print(repo.name)
def update_create_file(repo, filename, content, message):
try:
contents = repo.get_contents(filename)
except:
contents = None
# Erstelle File falls nicht existent
if not contents:
repo.create_file(path=filename, message=message, content=content, branch="master")
else:
contents = repo.get_contents(filename)
repo.update_file(path=filename, message=message, content=content, sha=contents.sha)
def upload_files(filenames, content):
g = Github(Config.ACCESS_TOKEN)
repo = g.get_user().get_repo(name=Config.TARGET_REPO)
message = "AutoCommit per " + datetime.ctime(datetime.utcnow())
for file, filename in zip(content, filenames):
update_create_file(
repo=repo,
filename="Projekte/" + filename + ".md",
content=file,
message=message)
``` |
{
"source": "joshmaglione/hypigu",
"score": 3
} |
#### File: hypigu/src/GenFunctions.py
```python
from .Database import internal_database as _data
from .Globals import __PRINT as _print
from .Globals import __TIME as _time
from functools import reduce as _reduce
# A function to return a poincare function.
def _Poincare_polynomial(L, sub=None):
from sage.all import var
if sub == None:
sub = var('Y')
def poincare(x):
pi = L.restriction(x).Poincare_polynomial()
try:
return pi.subs({pi.variables()[0] : sub})
except AttributeError: # In case pi is a constant.
return pi
return poincare
# The complete solutions for small central arrangements of rank <= 2.
def _small_central(A, style):
from sage.all import var
p = var('q')
t = var('t')
Y = var('Y')
T = var('T')
if A.rank() == 1:
if style == 'Igusa':
return (1 - p**-1)/(1 - p**-1*t)
if style == 'skele':
return (1 + Y)/(1 - T)
# Now we assume the rank == 2.
m = len(A)
if style == 'Igusa':
return (1 - p**-1)*(1 - (m - 1)*p**-1 + (m - 1)*p**-1*t - p**-2*t) / ((1 - p**-1*t)*(1 - p**-2*t**m))
if style == 'skele':
return (1 + m*Y + (m-1)*Y**2 + (m-1 + m*Y + Y**2)*T)/((1 - T)**2)
# The direct version of the universal generating function computation.
def _universal(L, anayltic=False, atom=False):
from sage.all import var
from .LatticeFlats import _subposet
# Set up the potential substitutions for T -- as defined in Maglione--Voll.
if anayltic:
q = var('q')
Y = -q**(-1)
P = L.poset
t_name = lambda x: var("t" + str(x))
if atom:
atoms = P.upper_covers(P.bottom())
def T_data(x):
under_poset = _subposet(P, x, lambda z: P.lower_covers(z))
elts = filter(lambda y: y in under_poset, atoms)
ts = map(t_name, elts)
return _reduce(lambda x, y: x*y, ts, q**(-P.rank(x)))
else:
def T_data(x):
under_poset = _subposet(P, x, lambda z: P.lower_covers(z))
elts = list(under_poset._elements)
elts.remove(P.bottom())
ts = map(t_name, elts)
return _reduce(lambda x, y: x*y, ts, q**(-P.rank(x)))
else:
T_data = lambda x: var("T" + str(x))
Y = var('Y')
T = {x : T_data(x) for x in L.poset._elements}
# Base cases for recursion.
if L.poset.has_top() and L.poset.rank() == 2:
elts = L.proper_part_poset()._elements
merge = lambda x, y: x + (1 + Y)**2*T[y]/(1 - T[y])
one = L.poset.top()
return _reduce(merge, elts, (1 + Y)*(1 + (len(elts) - 1)*Y))/(1-T[one])
if L.poset.rank == 1:
elts = list(L.poset._elements).remove(L.poset.bottom())
merge = lambda x, y: x + (1 + Y)*T[y]/(1 - T[y])
return _reduce(merge, elts, 1 + len(elts)*Y)
P = L.proper_part_poset()
poincare = _Poincare_polynomial(L, sub=Y)
recurse = lambda M: _universal(M, anayltic=anayltic, atom=atom)
num_dat = lambda x: poincare(x)*T[x]*recurse(L.subarrangement(x))
factors = map(num_dat, P._elements)
HP = _reduce(lambda x, y: x + y, factors, poincare(L.poset.bottom()))
if L.poset.has_top():
HP = HP/(1 - T[L.poset.top()])
return HP
def _Igusa_zeta_function(L, DB=True, verbose=_print):
from sage.all import var
from .Constructors import CoxeterArrangement
from .Braid import BraidArrangementIgusa
from .LatticeFlats import LatticeOfFlats, _Coxeter_poset_data
P = L.poset
q = var('q')
t = var('t')
# Base cases for recursion.
if P.has_top() and P.rank() == 2:
m = len(P) - 2
return (1 - q**-1)*(1 - (m-1)*q**-1 + m*(1 - q**-1)*q**-1*t/(1 - q**-1*t))/(1 - q**-2*t**m)
if P.rank() == 1:
m = len(P) - 1
return 1 - m*q**-1 + m*(1 - q**-1)*q**-1*t/(1 - q**-1*t)
if DB:
zeta = _data.get_gen_func(P, 'Igusa')
if zeta != None:
return zeta
# We check to see if we have a type A braid arrangement.
# We can compute these *extremely* quickly.
if _Coxeter_poset_data()['A']['hyperplanes'](P.rank()) == len(L.atoms()):
if _Coxeter_poset_data()['A']['poset'](P.rank()) == len(P):
B = CoxeterArrangement("A" + str(P.rank()))
if P.is_isomorphic(LatticeOfFlats(B).poset):
return BraidArrangementIgusa(P.rank())
poincare = _Poincare_polynomial(L, sub=-q**(-1))
t_factor = lambda X: t**len(L.flat_labels[X])
x_factor = lambda x: poincare(x)*t_factor(x)*q**(-P.rank(x))
eq_elt_data = L._combinatorial_eq_elts()
factors = map(lambda x: x[1]*x_factor(x[0]), eq_elt_data)
integrals = map(lambda x: _Igusa_zeta_function(x[2], DB=DB), eq_elt_data)
pi = poincare(P.bottom())
zeta = _reduce(lambda x, y: x + y[0]*y[1], zip(factors, integrals), 0) + pi
if P.has_top():
zeta = zeta/(1 - q**(-P.rank())*t**len(L.atoms()))
if DB and P.rank() > 2:
_data.save_gen_func(P, 'Igusa', zeta)
return zeta
def _top_zeta_function_uni(L, DB=True, verbose=_print):
from sage.all import var
P = L.poset
s = var('s')
C = 1*L.poset.has_top()
# Base cases for recursion.
if P.has_top() and P.rank() == 2:
m = len(P) - 2
return (2 + (2 - m)*s)/((2 + m*s)*(1 + s))
if P.rank() == 1:
m = len(P) - 1
return (1 + (1 - m)*s)/(1 + s)
poincare = _Poincare_polynomial(L)
Y = poincare(P.bottom()).variables()[0]
pi_circ = lambda x: (poincare(x)/(1 + Y)**C).factor().simplify().subs({Y: -1})
eq_elt_data = L._combinatorial_eq_elts()
factors = map(lambda x: x[1]*pi_circ(x[0]), eq_elt_data)
integrals = map(lambda x: _top_zeta_function_uni(x[2], DB=DB), eq_elt_data)
pi = pi_circ(P.bottom())
zeta = _reduce(lambda x, y: x + y[0]*y[1], zip(factors, integrals), 0) + pi
if C == 1:
zeta = zeta/(P.rank() + len(L.atoms())*s)
return zeta
def _top_zeta_function_mul(L, DB=True, verbose=_print, atom=False):
from sage.all import var
from .LatticeFlats import _subposet
P = L.poset
C = 1*L.poset.has_top()
s_name = lambda x: var("s" + str(x))
if atom:
if atom:
atoms = P.upper_covers(P.bottom())
def s_data(x):
under_poset = _subposet(P, x, lambda z: P.lower_covers(z))
elts = filter(lambda y: y in under_poset, atoms)
ts = map(s_name, elts)
return _reduce(lambda x, y: x + y, ts, 0)
else:
def s_data(x):
under_poset = _subposet(P, x, lambda z: P.lower_covers(z))
elts = list(under_poset._elements)
elts.remove(P.bottom())
ts = map(s_name, elts)
return _reduce(lambda x, y: x + y, ts, 0)
S = {x : s_data(x) for x in P._elements}
# Base cases for recursion.
add_em = lambda x, y: x + y
if P.has_top() and P.rank() == 2:
atms = P.upper_covers(P.bottom())
m = len(atms)
elt_dat = lambda x: 1/(1 + S[x])
return _reduce(add_em, map(elt_dat, atms), 2 - m)/(2 + S[P.top()])
if P.rank() == 1:
atms = P.upper_covers(P.bottom())
m = len(atms)
elt_dat = lambda x: 1/(1 + S[x])
return _reduce(add_em, map(elt_dat, atms), 1 - m)
poincare = _Poincare_polynomial(L)
Y = poincare(P.bottom()).variables()[0]
pi_circ = lambda x: (poincare(x)/(1 + Y)**C).factor().simplify().subs({Y: -1})
x_factor = lambda x: pi_circ(x)
prop_elts = L.proper_part_poset()._elements
factors = map(lambda x: x_factor(x), prop_elts)
integrals = map(lambda x: _top_zeta_function_mul(L.subarrangement(x), DB=DB, atom=atom), prop_elts)
pi = pi_circ(P.bottom())
zeta = _reduce(lambda x, y: x + y[0]*y[1], zip(factors, integrals), 0) + pi
if P.has_top():
zeta = zeta/(P.rank() + S[P.top()])
return zeta
def _comb_skele(L, DB=True, verbose=_print):
from sage.all import var
P = L.poset
Y = var('Y')
T = var('T')
if P.has_top():
if P.rank() == 1:
return (1 + Y)/(1 - T)
if P.rank() == 2:
m = len(P) - 2
return (1 + m*Y + (m - 1)*Y**2 + (m - 1 + m*Y + Y**2)*T)/(1 - T)**2
if DB:
if verbose:
print(_time() + "Checking database.")
zeta = _data.get_gen_func(P, 'skele')
if zeta != None:
return zeta
if verbose:
print("\tDone.")
poincare = _Poincare_polynomial(L)
if verbose:
print(_time() + "Gleaning structure from poset.")
eq_elt_data = L._combinatorial_eq_elts()
if verbose:
print("\tDone.")
print(_time() + "Lattice points: {0}, Relevant points: {1}".format(len(P), len(eq_elt_data)))
factors = map(lambda x: x[1]*T*poincare(x[0]), eq_elt_data)
if verbose:
print(_time() + "Recursing...")
integrals = map(lambda x: _comb_skele(x[2], DB=DB), eq_elt_data)
if verbose:
print(_time() + "Putting everything together...")
pi = poincare(P.bottom())
zeta = _reduce(lambda x, y: x + y[0]*y[1], zip(factors, integrals), 0) + pi
if P.has_top():
zeta = zeta/(1 - T)
if DB and P.rank() > 2:
_data.save_gen_func(P, 'skele', zeta)
return zeta
# Given a polynomial, return a hyperplane arrangement equivalent to the linear
# factors of f.
def _parse_poly(f):
from sage.all import SR, QQ, HyperplaneArrangements, Matrix
if type(f) == str:
f = SR(f)
if f.base_ring() == SR:
L = f.factor_list()
K = QQ
else:
L = list(f.factor())
K = f.base_ring()
L = filter(lambda T: not T[0] in K, L) # Remove constant factors
F, M = list(zip(*L))
# Verify that each polynomial factor is linear
is_lin = lambda g: all(map(lambda x: g.degree(x) <= 1, g.variables()))
if not all(map(is_lin, F)):
raise ValueError("Expected product of linear factors.")
varbs = f.variables()
varbs_str = tuple(map(lambda x: str(x), varbs))
HH = HyperplaneArrangements(K, varbs_str)
def poly_vec(g):
c = K(g.subs({x : 0 for x in g.variables()}))
return tuple([c] + [K(g.coefficient(x)) for x in varbs])
F_vec = tuple(map(poly_vec, F))
A = HH(Matrix(K, F_vec))
# This scrambles the hyperplanes, so we need to scramble M in the same way.
A_vec = tuple(map(lambda H: tuple(H.coefficients()), A.hyperplanes()))
perm = tuple([F_vec.index(v) for v in A_vec])
M_new = tuple([M[i] for i in perm])
return A, M_new
def CoarseFlagHPSeries(A=None, lattice_of_flats=None, int_poset=None, matroid=None, numerator=False, verbose=_print):
from .LatticeFlats import LatticeOfFlats
if matroid == None:
try:
if A.is_central() and A.rank() <= 2:
return _small_central(A, 'skele')
except AttributeError:
raise TypeError("object is not a hyperplane arrangement.")
if lattice_of_flats == None:
if verbose:
print("{0}Building lattice of flats".format(_time()))
if matroid == None:
L = LatticeOfFlats(A, poset=int_poset)
else:
L = LatticeOfFlats(matroid=matroid)
else:
L = lattice_of_flats
if verbose:
print("{0}Computing coarse flag Hilbert--Poincare series".format(_time()))
cfHP = _comb_skele(L)
if numerator:
D = cfHP.numerator_denominator()[1]
T = D.variables()[0]
if D == (T - 1)**(L.poset.rank()):
e = -1
if D == (1 - T)**(L.poset.rank()):
e = 1
return e*(cfHP*D).factor()
else:
return cfHP
def IgusaZetaFunction(X=None, lattice_of_flats=None, int_poset=None, matroid=None, verbose=_print):
from .LatticeFlats import LatticeOfFlats
from sage.all import var
HPA = True
if matroid == None:
try:
# Check if a hyperplane arrangement.
_ = X.hyperplanes()
A = X
except AttributeError:
# Not an HPA; deal with polynomial input.
A, M = _parse_poly(X)
if verbose:
print("{0}Constructed a hyperplane arrangement".format(_time()))
HPA = False
if lattice_of_flats == None:
if verbose:
print("{0}Building lattice of flats".format(_time()))
if matroid == None:
L = LatticeOfFlats(A, poset=int_poset)
else:
L = LatticeOfFlats(matroid=matroid)
else:
L = lattice_of_flats
if not HPA:
if list(M) == [1]*len(M):
if verbose:
print("{0}Computing Igusa's zeta function".format(_time()))
return _Igusa_zeta_function(L)
else:
if verbose:
print("{0}Computing the atom zeta function".format(_time()))
Z = _universal(L, anayltic=True, atom=True)
t = var('t')
SUB = {var('t' + str(k+1)) : t**(M[k]) for k in range(len(M))}
return Z.subs(SUB)
if verbose:
print("{0}Computing Igusa's zeta function".format(_time()))
return _Igusa_zeta_function(L)
def TopologicalZetaFunction(X=None, lattice_of_flats=None, int_poset=None, verbose=_print, multivariate=False, atom=False, matroid=None):
from .LatticeFlats import LatticeOfFlats
from sage.all import var
HPA = True
if matroid == None:
try:
# Check if a hyperplane arrangement.
_ = X.hyperplanes()
A = X
except AttributeError:
# Not an HPA; deal with polynomial input.
A, M = _parse_poly(X)
if verbose:
print("{0}Constructed a hyperplane arrangement".format(_time()))
HPA = False
if lattice_of_flats == None:
if matroid == None:
if verbose:
print("{0}Building lattice of flats".format(_time()))
L = LatticeOfFlats(A, poset=int_poset)
else:
L = LatticeOfFlats(matroid=matroid)
else:
L = lattice_of_flats
if verbose:
print("{0}Computing the topological zeta function".format(_time()))
if not HPA:
if list(M) == [1]*len(M):
return _top_zeta_function_uni(L)
else:
Z = _top_zeta_function_mul(L, atom=True)
s = var('s')
SUB = {var('s' + str(k+1)) : M[k]*s for k in range(len(M))}
return Z.subs(SUB)
if not multivariate:
return _top_zeta_function_uni(L)
return _top_zeta_function_mul(L, atom=atom)
def AnalyticZetaFunction(A=None, lattice_of_flats=None, int_poset=None, matroid=None, verbose=_print):
from .LatticeFlats import LatticeOfFlats
if lattice_of_flats == None:
if matroid == None:
if verbose:
print("{0}Building lattice of flats".format(_time()))
L = LatticeOfFlats(A, poset=int_poset)
else:
L = LatticeOfFlats(matroid=matroid)
else:
L = lattice_of_flats
if verbose:
print("{0}Computing the analytic zeta function".format(_time()))
return _universal(L, anayltic=True)
def AtomZetaFunction(A=None, lattice_of_flats=None, int_poset=None, matroid=None, verbose=_print):
from .LatticeFlats import LatticeOfFlats
if lattice_of_flats == None:
if matroid == None:
if verbose:
print("{0}Building lattice of flats".format(_time()))
L = LatticeOfFlats(A, poset=int_poset)
else:
L = LatticeOfFlats(matroid=matroid)
else:
L = lattice_of_flats
if verbose:
print("{0}Computing the atom zeta function".format(_time()))
return _universal(L, anayltic=True, atom=True)
def FlagHilbertPoincareSeries(A=None, lattice_of_flats=None, int_poset=None, matroid=None, verbose=_print):
from .LatticeFlats import LatticeOfFlats
if lattice_of_flats == None:
if matroid == None:
if verbose:
print("{0}Building lattice of flats".format(_time()))
L = LatticeOfFlats(A, poset=int_poset)
else:
L = LatticeOfFlats(matroid=matroid)
else:
L = lattice_of_flats
if verbose:
print("{0}Computing the flag Hilbert--Poincare series".format(_time()))
return _universal(L)
``` |
{
"source": "joshmaglione/SingularZeta",
"score": 3
} |
#### File: joshmaglione/SingularZeta/__init__.py
```python
__version__ = 1.0
print("Loading...")
# Load the global variables that the user can change.
from src.globalVars import _DEFAULT_INDENT as _indent
from src.globalVars import _DEFAULT_LOAD_DB as _load
from src.globalVars import _DEFAULT_p as _p
from src.globalVars import _DEFAULT_t as _t
from src.globalVars import _DEFAULT_USER_INPUT as _user_input
from src.globalVars import _DEFAULT_VERBOSE as _verbose
if not isinstance(_indent, str):
raise TypeError("Global variable '_DEFAULT_INDENT' must be a string.")
if not isinstance(_load, bool):
raise TypeError("Global variable '_DEFAULT_LOAD_DB' must be set to boolean: True or False.")
if not isinstance(_p, str):
raise TypeError("Global variable '_DEFAULT_p' must be a string.")
if not isinstance(_t, str):
raise TypeError("Global variable '_DEFAULT_t' must be a string.")
if not isinstance(_user_input, bool):
raise TypeError("Global variable '_DEFAULT_USER_INPUT' must be set to boolean: True or False.")
if not isinstance(_verbose, int):
raise TypeError("Global variable '_DEFAULT_VERBOSE' must be set to an integer: True or False.")
# Enables us to turn off printing.
from os import devnull as _DEVNULL
import sys as _sys
class _HiddenPrints:
def __enter__(self):
self._original_stdout = _sys.stdout
_sys.stdout = open(_DEVNULL, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
_sys.stdout.close()
_sys.stdout = self._original_stdout
# ------------------------------------------------------------------------------
# We front-load some functions so that the initial call after loading the
# SingularZeta is not slow.
# ------------------------------------------------------------------------------
# Start a Singular run
print(_indent + "Loading Singular.")
from sage.all import singular as _singular
_ = _singular.eval("1 + 1;")
# See if Zeta is already imported.
print(_indent + "Loading Zeta.")
try:
Zeta_ver = isinstance(Zeta.__version__, str)
except NameError:
try:
# This just turns off printing because the Zeta banner always comes up.
with _HiddenPrints():
# TODO: Eventually specify what we need.
import Zeta
Zeta_ver = isinstance(Zeta.__version__, str)
except ImportError:
Zeta_ver = False
except:
print(_indent*2 + "Something unexpected went wrong while loading Zeta.")
except:
print(_indent*2 + "Something unexpected went wrong while looking for Zeta.")
# Report what we know
if Zeta_ver:
print(_indent*2 + "Found Zeta version %s." % (Zeta.__version__))
else:
print(_indent*2 + "Could not find Zeta! Most functions unavailable.")
print(_indent*2 + "Zeta url: http://www.maths.nuigalway.ie/~rossmann/Zeta/")
del Zeta_ver
# 'from foo import *' leaves hidden functions hidden and brings it up to
# foo instead of foo.src
print(_indent + "Importing functions.")
from src.atlasClass import *
from src.atlasReport import *
from src.chartClass import *
from src.integrandClass import *
from src.interfaceSingular import *
from src.intLatticeClass import *
from src.localZFTest import *
from src.propertyTests import *
print(_indent + "User defined default settings:")
print(_indent*2 + "Load database: %s" % (_load))
print(_indent*2 + "User input: %s" % (_user_input))
print(_indent*2 + "Variable names: %s" % ([_p, _t]))
print(_indent*2 + "Verbose level: %s" % (_verbose))
print("SingularZeta v%s loaded." % (__version__))
```
#### File: SingularZeta/src/atlasReport.py
```python
from rationalPoints import _guess_polynomial
from globalVars import _DEFAULT_VERBOSE as _verbose
# A useful function for multiple lines
_cat_with_space = lambda x, y: x + "\n" + y
# Get the name of the atlas, which is the last folder of the directory.
def _get_atlas_name(A):
direc = A.directory
b = direc.rindex("/")
if "/" in direc[:b]:
a = direc[:b].rindex("/")
else:
a = -1
return direc[a + 1:b]
# The preamble to the tex document containing all the stuff above
# "\begin{document}."
def _preamble(title="", author=""):
lines = [
"\\documentclass[a4paper]{article}\n",
"\\usepackage{enumerate}",
"\\usepackage{hyperref}",
"\\hypersetup{",
"\tcolorlinks=true,",
"\tlinkcolor=blue,",
"\tfilecolor=blue,",
"\turlcolor=blue,",
"\tcitecolor=blue,",
"}",
"\\usepackage{amsmath}",
"\\usepackage{amsthm}",
"\\usepackage{amssymb}",
"\\usepackage[margin=2cm]{geometry}",
"\\usepackage{mathpazo}",
"\\usepackage{url}",
"\\usepackage[labelformat=simple]{subcaption}",
"\\usepackage{tikz}",
"\\usepackage{pgf}",
"\\usepackage{longtable}",
"\\usepackage{multirow}",
"\\usepackage{graphicx}\n",
"\\allowdisplaybreaks\n",
"\\title{%s}" % (title),
"\\author{%s}" % (author),
"\\date{\\today}"
]
return reduce(_cat_with_space, lines)
# The introduction of the tex document.
def _intro(direc):
sing_zeta = "\\textsf{SingularZeta}"
direc_str = "\\texttt{%s}" % (direc)
Fp = "$\\mathbb{F}_p$"
intro = """
This is a report generated by %s concerning the chart data in the
directory %s. We report all the computations we undertake in computing
the cone integral associated to %s. While this report is being
developed, we provide the table of varieties for which we need to count
the number of %s-rational points. The rows with no entry under %s-points
cannot be done automatically with the current implementation of %s.
Special attention should be given to the ones with no %s-points if such
examples arise.
""" % (
sing_zeta,
direc_str,
direc_str,
Fp,
Fp,
sing_zeta,
Fp
)
return intro.replace(" ", "")
# The introduction of the tex document.
def _intro_integral(direc):
sing_zeta = "\\textsf{SingularZeta}"
direc_str = "\\texttt{%s}" % (direc)
intro = """
This is a report generated by %s concerning the integral data in the
directory %s. We report all the computations we undertake in computing
the cone integral associated to %s. This report should be read as a kind
of ``printout.'' We write down all of the details as if solving this by
hand. We start with the main integral, and then we move to the charts
that correspond to leaves in the blow-up tree of %s. From each of these
charts, we traverse the vertices of the intersection poset of the
divisors. For each vertex, we write down the simplified integral. The
goal is that all of these integrals are written down correctly and are
monomial.
""" % (
sing_zeta,
direc_str,
direc_str,
direc_str
)
return intro.replace(" ", "")
# Given a set of integers, return a latex compatible string for a set of
# integers.
def _set_to_latex(S):
content = reduce(lambda x, y: x + str(y) + ", ", S, "")
return "$\\{" + content[:-2] + "\\}$"
# Format a polynomial to a latex compatible string.
def _format_poly(f):
from sage.all import latex
return "$%s$" % (latex(f))
# Convert the dictionary output by pRationalPorints into latex output.
def _poly_data_to_latex(P):
from sage.all import latex
system = P["simplified_system"]
gens = P["simplified_ring"].gens()
def _format_system(S):
sys_str = map(lambda f: latex(f), S)
poly_sys = reduce(lambda x, y: x + y + ",\\; ", sys_str, "$")
return poly_sys[:-4] + "$"
if len(system) <= 1:
return _format_poly(system[0]), len(gens)
else:
return _format_system(system), len(gens)
# A function that returns the header for the F_p table of non-polynomial point
# counts.
def _Fp_nonpoly_header(exists=False):
Fp = "$\\mathbb{F}_p$"
sing_zeta = "\\textsf{SingularZeta}"
header = """
\\subsection{Varieties with non-polynomial %s-point counts}\n
""" % (Fp)
header.replace(" ", "")
if exists:
header += """
We separate the table of varieties with %s-point count not obviously given by a polynomial. It is possible these varieties are given by a (uniform) polynomial, but %s could not guess this.
""" % (Fp, sing_zeta)
else:
header += """
We guess that all varieties have an %s-point count that is given by a (uniform) polynomial, so we do not have a table for this section.
""" % (Fp)
return header.replace(" ", "")
# A function that returns the header for the F_p table concerning the guesses.
def _Fp_guess_header(exists=False):
Fp = "$\\mathbb{F}_p$"
header = """
\\subsection{Varieties with estimated %s-point counts}\n
""" % (Fp)
header.replace(" ", "")
if exists:
header += """
We include the table of varieties which we could not explicitly determine the polynomials for the number of %s-points. However, we explicitly computed the counts for an overfit set of primes, so we expect these polynomials to be correct for all but finitely many primes.
""" % (Fp)
else:
header += """
We were not able to guess any of the %s-point counts for the varieties in this atlas.
""" % (Fp)
return header.replace(" ", "")
# A function that returns the header for the entire F_p table.
def _Fp_table_header():
Fp = "$\\mathbb{F}_p$"
header = """
\\subsection{The %s-point counts for all varieties}
We write all the varieties for all the monomial cone integrals in one
table.
""" % (Fp)
return header.replace(" ", "")
# A function that returns the header for the entire F_p table.
def _unique_Fp_table_header():
Fp = "$\\mathbb{F}_p$"
header = """
\\subsection{The unique %s-point counts for all varieties}
By unique points, we mean the points only contained in a variety and,
thus, not contained in any other variety. We write all the varieties
for all the monomial cone integrals in
one table.
""" % (Fp)
return header.replace(" ", "")
# A function to build the F_p-table of the varieties with non-polynomial
# Fp-point counts as a latex compatible string.
def _build_nonpoly_Fp_table(chrts):
table_top = [
"\\begin{center}",
"\t\\begin{longtable}{|c|c|p{6cm}|c|c|}",
"\t\t\\hline",
"\t\t\\textbf{Chart} & " +
"\\textbf{Vertex} & " +
"\\textbf{Variety} & " +
"\\textbf{Dim.}\\\\ \\hline \\hline"
]
table_end = [
"\t\\end{longtable}",
"\\end{center}"
]
# The main function we apply to all charts.
def extraction(C):
ID = C._id
V = C.intLat.vertices
Fp = C.intLat.pRationalPoints()
data = zip(V, Fp)
def get_info(X):
info = "\t\t%s & %s & " % (ID, _set_to_latex(X[0]))
info += "%s & %s" % (_poly_data_to_latex(X[1][1]))
if not "C" in str(X[1][0]):
return ""
info += " \\\\ \\hline"
return info
chart_section = filter(lambda l: l != "", map(get_info, data))
if len(chart_section) == 0:
return [""]
chart_section[-1] = chart_section[-1] + " \\hline"
return chart_section
# Get all the chart data from extraction, and flatten it down.
table_main = reduce(lambda x, y: x + y, map(extraction, chrts), [])
table = table_top + table_main + table_end
return reduce(_cat_with_space, table)
# A function to build the F_p-table of the varieties where we guessed the
# Fp-point count with polynomials.
def _build_estimate_Fp_table(chrts):
table_top = [
"\\begin{center}",
"\t\\begin{longtable}{|c|c|p{6cm}|c|c|c|}",
"\t\t\\hline",
"\t\t\\textbf{Chart} & " +
"\\textbf{Vertex} & " +
"\\textbf{Variety} & " +
"\\textbf{Dim.} & " +
"\\textbf{Guess}\\\\ \\hline \\hline"
]
table_end = [
"\t\\end{longtable}",
"\\end{center}"
]
# The main function we apply to all charts.
def extraction(C):
ID = C._id
V = C.intLat.vertices
Fp = C.intLat.pRationalPoints()
data = zip(V, Fp)
def get_info(X):
info = "\t\t%s & %s & " % (ID, _set_to_latex(X[0]))
info += "%s & %s & " % (_poly_data_to_latex(X[1][1]))
if "X" in str(X[1][0]):
info += "{\\footnotesize " + _format_poly(X[1][0]) + "}"
else:
return ""
info += " \\\\ \\hline"
return info
chart_section = filter(lambda l: l != "", map(get_info, data))
if len(chart_section) == 0:
return [""]
chart_section[-1] = chart_section[-1] + " \\hline"
return chart_section
# Get all the chart data from extraction, and flatten it down.
table_main = reduce(lambda x, y: x + y, map(extraction, chrts), [])
table = table_top + table_main + table_end
return reduce(_cat_with_space, table)
# A function to build the entire F_p-table as a latex compatible string.
def _build_Fp_table(A):
Fp = "$\\mathbb{F}_p$"
table_top = [
"\\begin{center}",
"\t\\begin{longtable}{|c|c|p{6cm}|c|c|c|}",
"\t\t\\hline",
"\t\t\\textbf{Chart} & " +
"\\textbf{Vertex} & " +
"\\textbf{Variety} & " +
"\\textbf{Dim.}\\ & " +
"%s-\\textbf{points}\\\\ \\hline \\hline" % (Fp)
]
table_end = [
"\t\\end{longtable}",
"\\end{center}"
]
# The main function we apply to all charts.
def extraction(C):
ID = C._id
V = C.intLat.vertices
Fp = C.intLat.pRationalPoints()
data = zip(V, Fp)
def get_info(X):
info = "\t\t%s & %s & " % (ID, _set_to_latex(X[0]))
info += "%s & %s & " % (_poly_data_to_latex(X[1][1]))
if not "C" in str(X[1][0]):
info += "{\\footnotesize " + _format_poly(X[1][0]) + "}"
else:
# is_poly, f = _guess_polynomial(X[1][1]["simplified_ring"], X[1][1]["simplified_system"])
# if is_poly:
# info += "{\\footnotesize " + _format_poly(f) + "}"
# else:
info += "{\\tiny NOT POLYNOMIAL}"
info += " \\\\ \\hline"
return info
chart_section = map(get_info, data)
chart_section[-1] = chart_section[-1] + " \\hline"
return chart_section
# Get all the chart data from extraction, and flatten it down.
table_main = reduce(lambda x, y: x + y, map(extraction, A.charts), [])
# Put everything together and return a string.
table = table_top + table_main + table_end
return reduce(_cat_with_space, table)
# A function to build the entire unique F_p-table as a latex compatible string.
def _build_unique_Fp_table(A):
Fp = "$\\mathbb{F}_p$"
table_top = [
"\\begin{center}",
"\t\\begin{longtable}{|c|c|p{6cm}|c|c|c|}",
"\t\t\\hline",
"\t\t\\textbf{Chart} & " +
"\\textbf{Vertex} & " +
"\\textbf{Variety} & " +
"\\textbf{Dim.}\\ & " +
"%s-\\textbf{points}\\\\ \\hline \\hline" % (Fp)
]
table_end = [
"\t\\end{longtable}",
"\\end{center}"
]
# The main function we apply to all charts.
def extraction(C):
ID = C._id
V = C.intLat.vertices
Fp = C.intLat.pRationalPoints()
Fp = list(map(lambda x: list(x), Fp))
for i in range(len(Fp)):
Fp[i][0] = C.intLat._vertexToPoints[i]
data = zip(V, Fp)
def get_info(X):
info = "\t\t%s & %s & " % (ID, _set_to_latex(X[0]))
info += "%s & %s & " % (_poly_data_to_latex(X[1][1]))
if not "C" in str(X[1][0]):
info += "{\\footnotesize " + _format_poly(X[1][0]) + "}"
else:
# is_poly, f = _guess_polynomial(X[1][1]["simplified_ring"], X[1][1]["simplified_system"])
# if is_poly:
# info += "{\\footnotesize " + _format_poly(f) + "}"
# else:
info += "{\\tiny NOT POLYNOMIAL}"
info += " \\\\ \\hline"
return info
chart_section = map(get_info, data)
chart_section[-1] = chart_section[-1] + " \\hline"
return chart_section
# Get all the chart data from extraction, and flatten it down.
table_main = reduce(lambda x, y: x + y, map(extraction, A.charts), [])
# Put everything together and return a string.
table = table_top + table_main + table_end
return reduce(_cat_with_space, table)
def _cone_cond(C):
from sage.all import latex
first = "\\begin{align*}\n"
last = "\\end{align*}\n"
def one_line(T):
return "v_p(%s) &\leq v_p(%s) \\\\\n" % (latex(T[0]), latex(T[1]))
lines = reduce(lambda x, y: x + y, map(one_line, C), "").replace("\\left", "").replace("\\right", "")
return first + lines + last
def _main_int(A):
from sage.all import latex, gens
I = A.integrand
para = """
The main integral we aim to solve is:
\\begin{equation}
%s \\int_{S} %s \,|\mathrm{d}X|,
\\end{equation}
where $S$ is the subset of $\\mathbb{Z}_p^{%s}$ such that
%s
""" % (latex(I.pFactor().simplify()), I.InsideLatex(), len(gens(A.root.AmbientSpace())), _cone_cond(A.root.cone))
def chart_to_verts(x):
try:
return len(x.intLat.vertices)
except:
return 0
add_up = lambda x, y: x + y
Nverts = reduce(add_up, map(chart_to_verts, A.charts))
next_para = """
We use %s charts, %s of which are leaves. There are a total of %s
integrals to solve.
""" % (A.number_of_charts, len(A.charts), Nverts)
return (para + next_para).replace(" ", "")
def _birationalmap_data(O, N):
from sage.all import latex
first_line = "\\begin{align*}\n"
last_line = "\\end{align*}\n"
V = list(zip(O, N))
n = len(V) // 2 + len(V) % 2
V1 = V[:n]
V2 = V[n:]
mid = ""
for i in range(n):
if i < len(V2):
mid += "%s &\\mapsto %s & %s &\\mapsto %s %s \n" % (latex(V1[i][0]), latex(V1[i][1]), latex(V2[i][0]), latex(V2[i][1]), "\\\\"*(i != n - 1))
else:
mid += "%s &\\mapsto %s & & \n" % (latex(V1[i][0]), latex(V1[i][1]))
return first_line + mid + last_line
def _chart_data(C):
from sage.all import latex
A = C.atlas
old_vars = A.root.birationalMap
new_vars = C.birationalMap
intro = "We apply the following substitution to the initial variables:\n"
biratmap = _birationalmap_data(old_vars, new_vars)
jac = "This substitution yields a Jacobian factor equal to \n\\begin{align*}\n\\left|%s\\right|.\n\\end{align*}\n" % (latex(C.jacDet))
f = reduce(lambda x, y: x + y + ",", map(lambda z: latex(z), C.focus), "")
focus = "The focus is generated by the following:\n\\begin{align*}\n%s.\n\\end{align*}\n" % (f[:-1])
I = C.Integrand()
integral = "The integral simplifies to \n\\begin{equation}\n%s \\int_{S} %s \,|\mathrm{d}X|,\n\\end{equation}\n" % (latex(I.pFactor().simplify()), I.InsideLatex())
cone = "with cone conditions given by:\n" + _cone_cond(C.cone)
return intro + biratmap + jac + focus + integral + cone
def _subchart_section(C):
from sage.all import latex
Subs = C.Subcharts()
P = C.intLat
verts = P.vertices
prod = reduce(lambda x, y: x*y, P.divisors, 1)
subs = ""
for i in range(len(verts)):
subs += "\n\\subsection{Vertex $%s$}\n\n" % (latex(verts[i]).replace("\\left", "").replace("\\right", ""))
if len(verts[i]) < len(P.divisors):
subs += "On this subchart, we assume the following divisors are units:\n"
subs += "\\begin{align*}\n"
subs += reduce(lambda x, y: x + y + " && ", map(lambda j: latex(P.divisors[j]), [j for j in range(len(P.divisors)) if not j in verts[i]]), "")[:-3] + ",\n"
subs += "\\end{align*}\n"
else:
subs += "None of the divisors are considered to be units, "
subs += "and the following divisors are divisible by $p$:\n"
subs += "\\begin{align*}\n"
subs += reduce(lambda x, y: x + y + " && ", map(lambda j: latex(P.divisors[j]), verts[i]), "")[:-3] + ".\n"
subs += "\\end{align*}\n"
if len(verts[i]) > 0:
subs += "We make the following substitutions:\n\\begin{align*}\n"
for j in range(len(verts[i])):
subs += "%s &= pz_{%s}, &" % (latex(P.divisors[sorted(list(verts[i]))[j]]), j + 1)
subs = subs[:-3] + ".\n\\end{align*}\n"
subs += "The number of points in $\\mathbb{F}_p^{%d}$ contained in this subchart and no others is equal to\n$$\n%s.\n$$\n\n" % (len(prod.variables()), latex(P._vertexToPoints[i]))
subs += _chart_data(Subs[i])
return subs
def _chart_section(C, direc=""):
from sage.all import latex
sect_title = "\\section{Chart %s}\n\n" % (C._id)
if C.intLat:
P = C.intLat
if len(P.poset) > 0:
P_plot = P.poset.plot()
p_name = "img/Poset" + C._id + ".png"
P_plot.save(direc + p_name)
pos = "The intersection poset for this chart looks like\n\n\\begin{center}\n\\includegraphics[scale=0.5]{%s}\n\\end{center}\n" % (p_name)
pos += "The vertices of the above poset are labeled by sets of integers. The integers correspond to the following divisors: \n\\begin{enumerate}\n"
pos += reduce(lambda x, y: x + y, map(lambda d: "\\item[%s:] $%s$\n" % (P.divisors.index(d), latex(d)), P.divisors), "")
pos += "\\end{enumerate}\n"
if not P.poset.is_isomorphic(P.DivisorPoset()):
p_name_new = "img/Poset" + C._id + "_new.png"
P_plot_new = P.DivisorPoset().plot()
P_plot_new.save(direc + p_name_new)
pos += "\nThe above poset seems to be \\textbf{incorrect}. Using the same labels, the poset should be as follows\n\n\\begin{center}\n\\includegraphics[scale=0.5]{%s}\n\\end{center}\n" % (p_name_new)
subs = _subchart_section(C)
else:
pos = "The intersection poset is trivial since the integral is already monomial, and so there are no further subdivisions of this chart.\n"
subs = ""
else:
pos = "This chart does not have any data for its intersection poset.\n"
subs = ""
return sect_title + _chart_data(C) + pos + subs
# ==============================================================================
# Main function
# ==============================================================================
# The following is a function that outputs a tex file. The tex file provides
# information concerning the polynomials associated to the atlas A for which we
# cannot automatically determine the number of F_p-rational points.
def RationalReport(A, file=""):
# Take care of input
if not isinstance(file, str):
raise TypeError("Expected 'file' to be a string.")
# Make sure the file string is formatted correctly.
atlas_name = _get_atlas_name(A)
if file == "":
file = atlas_name + "_RationalReport.tex"
if not ".tex" in file:
file_name = file + ".tex"
else:
file_name = file
atlas_name_latex = atlas_name.replace("_", "\\_")
title = "Rational report for %s" % (atlas_name_latex)
with open(file_name, 'w') as tex_file:
tex_file.write(_preamble(title=title))
tex_file.write("\n\n\\begin{document}")
tex_file.write("\n\n\\maketitle")
tex_file.write("\n\\tableofcontents\n\n")
tex_file.write("\n\n\\section{Introduction}\n\n")
tex_file.write(_intro(atlas_name_latex))
with open(file_name, 'a') as tex_file:
tex_file.write("\n\n\\section{Counting $\\mathbb{F}_p$-points}")
# Determine the F_p-rational points of the atlas.
# These are stored with the intersection lattices.
# If these were previously computed, then nothing happens here.
_ = map(lambda C: C.intLat.pRationalPoints(user_input=False), A.charts)
# Get the charts with a vertex with non-polynomial point count.
def nonpoly(C):
Fp_counts = C.intLat.pRationalPoints()
counts = list(map(lambda X: X[0], Fp_counts))
return filter(lambda x: "C" in str(x), counts) != []
nonpoly_chrts = list(filter(nonpoly, A.charts))
if _verbose >= 1:
print("Guessing polynomial Fp-point counts.")
nonpoly_exists = False
guess_exists = False
# A function we apply to the nonpoly charts. If it guesses a poly it will
# replace the non-guess with a polynomial in X.
def guess_chart(C):
Fp_points = C.intLat.pRationalPoints()
def guess_func(data):
if "C" in str(data[0]):
is_poly, f = _guess_polynomial(data[1]["simplified_ring"],
data[1]["simplified_system"])
if is_poly:
guess_exists = True
return tuple([f, data[1]])
nonpoly_exists = True
return data
C.intLat.p_points = map(guess_func, Fp_points)
return C
checked_chrts = map(guess_chart, nonpoly_chrts)
# Build the non-polynomial table
nonpoly_table = _build_nonpoly_Fp_table(checked_chrts)
# Build the estimate table
estimate_table = _build_estimate_Fp_table(checked_chrts)
# Build the entire table
table = _build_Fp_table(A)
# Build the entire unique table
unique_table = _build_unique_Fp_table(A)
with open(file_name, 'a') as tex_file:
tex_file.write("\n\n" + _Fp_nonpoly_header(exists=nonpoly_exists))
tex_file.write("\n\n" + nonpoly_table + "\n\n")
tex_file.write("\n\n" + _Fp_guess_header(exists=guess_exists))
tex_file.write("\n\n" + estimate_table + "\n\n")
tex_file.write("\n\n" + _Fp_table_header())
tex_file.write("\n\n" + table + "\n\n")
tex_file.write("\n\n" + _unique_Fp_table_header())
tex_file.write("\n\n" + unique_table + "\n\n")
with open(file_name, 'a') as tex_file:
tex_file.write("\n\n\\end{document}")
return
# The following is a function that outputs a tex file. The tex file provides
# information concerning the integrals associated to the atlas A.
def IntegralReport(A, direc=""):
# Make sure the file string is formatted correctly.
atlas_name = _get_atlas_name(A)
file_name = direc + atlas_name + "_IntReport.tex"
atlas_name_latex = atlas_name.replace("_", "\\_")
title = "Integral report for %s" % (atlas_name_latex)
with open(file_name, 'w') as tex_file:
tex_file.write(_preamble(title=title))
tex_file.write("\n\n\\begin{document}")
tex_file.write("\n\n\\maketitle")
tex_file.write("\n\\tableofcontents\n\n")
tex_file.write("\n\n\\section{Introduction}\n\n")
tex_file.write(_intro_integral(atlas_name_latex))
with open(file_name, 'a') as tex_file:
tex_file.write("\n\n\\section{Main integral}\n\n")
tex_file.write(_main_int(A))
with open(file_name, 'a') as tex_file:
for C in A.charts:
tex_file.write(_chart_section(C, direc=direc))
with open(file_name, 'a') as tex_file:
tex_file.write("\n\n\\end{document}")
return
``` |
{
"source": "joshmaglione/ZetaFunctionSandbox",
"score": 3
} |
#### File: ZetaFunctionSandbox/src/ThinReps.py
```python
from sage.all import Matrix as _Matrix
from sage.all import Polyhedron as _Polyhedron
from sage.all import PolynomialRing as _PolynomialRing
from sage.all import QQ as _QQ
from sage.all import var as _var
from sage.rings.integer import Integer as _Sage_int
from Zeta.smurf import SMURF as _Zeta_smurf
from GenFunc import GenFunc as _GenFunc
# Make sure we understand the input to the main functions.
def _input_check(word, leq_char, verbose, variable, sub):
if not isinstance(word, str):
raise TypeError('Expected the word to be a string.')
if len({w for w in word}) > 2:
raise ValueError('Expected word to be a 2-letter alphabet.')
if not isinstance(leq_char, str):
raise TypeError('Expected leq_char to be a string.')
if not isinstance(verbose, bool):
raise TypeError('Expected "verbose" to be either True or False.')
if not isinstance(variable, str):
raise TypeError('Expected "variable" to be a string.')
pass
# Given a word and a leq_char, construct the matrix whose rows give the
# inequalities for the Polyhedron function in Sage.
def _build_ineqs(word, leq_char, Dynkin="A"):
# Initial values.
n = len(word) + 1
relations = []
zero_vec = tuple([0 for i in range(n + 1)])
# Basic function: add k to the i-th component of v.
def add_k_i(v, i, k):
u = list(v)
u[i] += k
return tuple(u)
# nonnegative relations.
relations += [add_k_i(zero_vec, i, 1) for i in range(1, n + 1)]
# word relations.
if n > 1:
for x in zip(word, range(1, n)):
if x[0] == leq_char:
if Dynkin == "D" and x[1] == n-1:
v = add_k_i(zero_vec, x[1] - 1, -1)
u = add_k_i(v, x[1] + 1, 1)
elif Dynkin == "E" and x[1] == n-1:
v = add_k_i(zero_vec, 3, -1)
u = add_k_i(v, x[1] + 1, 1)
else:
v = add_k_i(zero_vec, x[1], -1)
u = add_k_i(v, x[1] + 1, 1)
else:
if Dynkin == "D" and x[1] == n-1:
v = add_k_i(zero_vec, x[1] - 1, 1)
u = add_k_i(v, x[1] + 1, -1)
elif Dynkin == "E" and x[1] == n-1:
v = add_k_i(zero_vec, 3, 1)
u = add_k_i(v, x[1] + 1, -1)
else:
v = add_k_i(zero_vec, x[1], 1)
u = add_k_i(v, x[1] + 1, -1)
relations.append(u)
return relations
def _eval_relations(relations, verbose, variable, sub):
n = len(relations[0]) - 1
# In case the user wants to verify the matrix.
if verbose:
print("The matrix corresponding to the polyhedral cone:")
print("%s" % (_Matrix(relations)))
# Define the polyhedral cone and corresponding polynomial ring.
P = _Polyhedron(ieqs=relations)
R = _PolynomialRing(_QQ, 'x', n)
# Define substitution.
if sub:
t = _var(variable)
if n > 1:
subs = {_var('x' + str(i)) : t for i in range(n)}
else:
subs = {_var('x') : t}
# Apply Zeta
sm = _Zeta_smurf.from_polyhedron(P, R)
Z = sm.evaluate().subs(subs).factor().simplify()
else:
# Apply Zeta
sm = _Zeta_smurf.from_polyhedron(P, R)
Z = sm.evaluate().factor().simplify()
return Z
def _solve_and_wrap(rels, verb=False, varb='t', sub=True):
Z = _GenFunc(_eval_relations(rels, verb, varb, sub))
if sub:
stand_denom = 1
X = _var(varb)
for k in range(1, len(rels[0])):
stand_denom *= (1 - X**k)
return Z.format(denominator=stand_denom)
else:
return Z
def ThinZeta_An(word, leq_char="0", verbose=False, variable='t', sub=True):
# Make sure we understand the input.
if isinstance(word, int) or isinstance(word, _Sage_int):
word = str(word.binary()[1:])[::-1]
if verbose:
print(word)
_input_check(word, leq_char, verbose, variable, sub)
relations = _build_ineqs(word, leq_char)
return _solve_and_wrap(relations, verb=verbose, varb=variable, sub=sub)
def ThinZeta_Dn(word, leq_char="0", verbose=False, variable='t', sub=True):
# Make sure we understand the input.
if isinstance(word, int) or isinstance(word, _Sage_int):
word = str(word.binary()[1:])[::-1]
if verbose:
print(word)
# If this should be type An instead, we use that function instead.
if len(word) <= 2:
return ThinZeta_An(word, leq_char=leq_char, verbose=verbose, variable=variable, sub=sub)
_input_check(word, leq_char, verbose, variable, sub)
relations = _build_ineqs(word, leq_char, Dynkin="D")
return _solve_and_wrap(relations, verb=verbose, varb=variable, sub=sub)
def ThinZeta_En(word, leq_char="0", verbose=False, variable='t', sub=True):
# Make sure we understand the input.
if isinstance(word, int) or isinstance(word, _Sage_int):
word = str(word.binary()[1:])[::-1]
if verbose:
print(word)
# If this should be type An instead, we use that function instead.
if len(word) <= 4:
return ThinZeta_An(word, leq_char=leq_char, verbose=verbose, variable=variable, sub=sub)
if len(word) > 7:
raise ValueError("Expected at most 7 edges for type E.")
_input_check(word, leq_char, verbose, variable, sub)
relations = _build_ineqs(word, leq_char, Dynkin="E")
return _solve_and_wrap(relations, verb=verbose, varb=variable, sub=sub)
``` |
{
"source": "joshmaker/django-admin-locking",
"score": 2
} |
#### File: django-admin-locking/locking/models.py
```python
from __future__ import absolute_import, unicode_literals, division
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils import timezone
from .settings import DEFAULT_EXPIRATION_SECONDS
__all__ = ('Lock', )
class QueryMixin(object):
def unexpired(self):
return self.filter(date_expires__gte=timezone.now())
class LockingQuerySet(QueryMixin, models.query.QuerySet):
pass
class LockingManager(QueryMixin, models.Manager):
def delete_expired(self):
"""Delete all expired locks from the database"""
self.filter(date_expires__lt=timezone.now()).delete()
def lock_for_user(self, content_type, object_id, user):
"""
Try to create a lock for a user for a given content_type / object id.
If a lock does not exist (or has expired) the current user gains a lock
on this object. If another user already has a valid lock on this object,
then Lock.ObjectLockedError is raised.
"""
try:
lock = self.get(content_type=content_type, object_id=object_id)
except Lock.DoesNotExist:
lock = Lock(content_type=content_type, object_id=object_id, locked_by=user)
else:
if lock.has_expired:
lock.locked_by = user
elif lock.locked_by.id != user.id:
raise Lock.ObjectLockedError('This object is already locked by another user',
lock=lock)
lock.save()
return lock
def force_lock_for_user(self, content_type, object_id, user):
"""Like `lock_for_user` but always succeeds (even if locked by another user)"""
lock, created = self.get_or_create(content_type=content_type,
object_id=object_id,
defaults={'locked_by': user})
if not created or lock.locked_by.pk != user.pk:
lock.locked_by = user
lock.save()
return lock
def lock_object_for_user(self, obj, user):
"""Calls `lock_for_user` on a given object and user."""
ct_type = ContentType.objects.get_for_model(obj)
return self.lock_for_user(content_type=ct_type, object_id=obj.pk, user=user)
def force_lock_object_for_user(self, obj, user):
"""Like `lock_object_for_user` but always succeeds (even if locked by another user)"""
ct_type = ContentType.objects.get_for_model(obj)
return self.force_lock_for_user(content_type=ct_type, object_id=obj.pk, user=user)
def for_object(self, obj):
ct_type = ContentType.objects.get_for_model(obj)
return self.filter(content_type=ct_type, object_id=obj.pk).unexpired()
def get_queryset(self):
return LockingQuerySet(self.model)
class Lock(models.Model):
id = models.CharField(max_length=15, primary_key=True)
locked_by = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
on_delete=models.CASCADE)
date_expires = models.DateTimeField()
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
objects = LockingManager()
class Meta:
db_table = getattr(settings, 'LOCKING_DB_TABLE', 'locking_lock')
unique_together = ('content_type', 'object_id', )
permissions = (("can_unlock", "Can remove other user's locks"), )
class ObjectLockedError(Exception):
def __init__(self, message, lock):
self.lock = lock
super(Lock.ObjectLockedError, self).__init__(message)
def save(self, *args, **kwargs):
"Save lock and renew expiration date"
self.id = "%s.%s" % (self.content_type_id, self.object_id)
seconds = getattr(settings, 'LOCKING_EXPIRATION_SECONDS', DEFAULT_EXPIRATION_SECONDS)
self.date_expires = timezone.now() + timezone.timedelta(seconds=seconds)
super(Lock, self).save(*args, **kwargs)
def expire(self, seconds):
"Set lock to expire in `seconds` from now"
self.date_expires = timezone.now() + timezone.timedelta(seconds=seconds)
Lock.objects.filter(pk=self.pk).update(date_expires=self.date_expires)
def to_dict(self):
return {
'locked_by': {
'username': self.locked_by.username,
'first_name': self.locked_by.first_name,
'last_name': self.locked_by.last_name,
'email': self.locked_by.email,
},
'date_expires': self.date_expires,
'app': self.content_type.app_label,
'model': self.content_type.model,
'object_id': self.object_id,
}
@property
def has_expired(self):
return self.date_expires < timezone.now()
@classmethod
def is_locked(cls, obj, for_user=None):
return cls.objects.for_object(obj=obj).exclude(locked_by=for_user).exists()
``` |
{
"source": "joshmal9999/Quoridor-Online",
"score": 4
} |
#### File: client/src/coord.py
```python
import pygame
from quoridor.client.src.wall import Wall
class Coord:
"""Create a coord"""
def __init__(self, x, y, win, coords):
self.win = win
self.coords = coords
self.x = x
self.y = y
self.tuple = (x, y)
self.is_occuped = False
# Window attributs
self.top_left = self.make_top_left()
self.middle = self.make_middle()
self.rect = self.make_rect()
# Links
self.north = None
self.east = None
self.south = None
self.west = None
self.wall_east = None
self.wall_south = None
def coord_north(self):
"""Return the coord on the north"""
if self.y - 1 >= 0:
return self.coords.find_coord(self.x, self.y - 1)
return None
def coord_east(self):
"""Return the coord on the east"""
if self.x + 1 <= 8:
return self.coords.find_coord(self.x + 1, self.y)
return None
def coord_south(self):
"""Return the coord on the south"""
if self.y + 1 <= 8:
return self.coords.find_coord(self.x, self.y + 1)
return None
def coord_west(self):
"""Return the coord on the west"""
if self.x - 1 >= 0:
return self.coords.find_coord(self.x - 1, self.y)
return None
def make_top_left(self):
"""Return the top left point of a coord on a window"""
win = self.win
x = ((win.wall_width + win.case_side)*self.x
+ win.wall_width + win.top_left[0])
y = ((win.wall_width + win.case_side)*self.y
+ win.wall_width + win.top_left[1])
return (x, y)
def make_middle(self):
"""Return the middle point of a coord on a window"""
win = self.win
x = ((win.wall_width + win.case_side)*self.x
+ (win.wall_width + win.case_side // 2)
+ win.top_left[0])
y = ((win.wall_width + win.case_side)*self.y
+ (win.wall_width + win.case_side // 2)
+ win.top_left[1])
return (x, y)
def make_rect(self):
"""Return the rectangle of the coord"""
win = self.win
x, y = self.top_left
return (x, y, win.case_side, win.case_side)
def make_wall_east(self):
"""Return the east wall of the coord"""
if self.east is not None and self.y != 8:
return Wall(self, self.east, self.win)
return None
def make_wall_south(self):
"""Return the south wall of the coord"""
if self.south is not None and self.x != 8:
return Wall(self, self.south, self.win)
return None
def link_coord(self):
"""Link the coords"""
self.north = self.coord_north()
self.east = self.coord_east()
self.south = self.coord_south()
self.west = self.coord_west()
def make_walls(self):
"""Make the walls around the coord"""
self.wall_east = self.make_wall_east()
self.wall_south = self.make_wall_south()
def make_cross_walls(self):
"""Make the cross walls of the walls of the coord"""
if self.wall_east is not None:
self.wall_east.make_cross_wall()
if self.wall_south is not None:
self.wall_south.make_cross_wall()
def same_row(self, other):
"""Return True if the two coords are on the same row"""
return self.y == other.y
def same_column(self, other):
"""Return True if the two coords are on the same column"""
return self.x == other.x
def __str__(self):
"""String format of a coord"""
return f"({self.x}, {self.y})"
def __eq__(self, other):
"""Operator == between two coords"""
return self.x == other.x and self.y == other.y
def draw(self, color):
"""Draw the rectangle of a coord"""
pygame.draw.rect(self.win.win, color, self.rect)
class Coords:
"""Manage the coords"""
def __init__(self, win):
self.win = win
self.coords = self.make_coords()
self.link_coords()
self.make_walls()
def make_coords(self):
"""Make coords"""
coords = []
for x in range(9):
for y in range(9):
coords.append(Coord(x, y, self.win, self))
return coords
def link_coords(self):
"""Link coords"""
for c in self.coords:
c.link_coord()
def make_walls(self):
"""Make walls"""
for c in self.coords:
c.make_walls()
for c in self.coords:
c.make_cross_walls()
def find_coord(self, x, y):
"""Find the coord corresponding to x and y"""
return self.coords[x * 9 + y]
def reset(self):
"""Reset coords"""
for c in self.coords:
c.is_occuped = False
```
#### File: client/src/sounds.py
```python
import pygame
class Sounds:
"""Manage sounds"""
def __init__(self, path):
pygame.mixer.init()
self.start_sound = pygame.mixer.Sound(
"".join([path, "/sounds/start_sound.wav"]))
self.winning_sound = pygame.mixer.Sound(
"".join([path, "/sounds/winning_sound.wav"]))
```
#### File: client/src/window.py
```python
import pygame
from quoridor.client.src.colors import Colors
from quoridor.client.src.widgets import Text, Button
from quoridor.client.src.coord import Coords
def pos_in_rect(rect, pos):
"""Return True if pos is in the rectangle"""
pos_x, pos_y = pos
x, y, width, height = rect
return (x <= pos_x <= x + width
and y <= pos_y <= y + height)
class Window:
"""Create the window"""
def __init__(self, width=1000, height=830, case_side=65, wall_width=15,
title="Quoridor Online", bgcolor=Colors.white):
self.width = width
self.height = height
self.case_side = case_side
self.wall_width = wall_width
self.bgcolor = bgcolor
self.top_left = (20, 20)
self.side_board = 9*self.case_side + 10*self.wall_width
self.win = pygame.display.set_mode((width, height))
pygame.display.set_caption(title)
self.button_restart = Button("Restart", self.side_board + 60,
self.side_board - 100, Colors.red,
show=False)
self.button_quit = Button("Quit", self.side_board + 60,
self.side_board - 50, Colors.red)
self.buttons = [self.button_restart, self.button_quit]
self.title = Text("Quoridor", Colors.black, size=50)
self.info = Text("Welcome to Quoridor Online!", Colors.black, size=45)
self.coords = Coords(self)
def update_info(self, text, color=None):
"""Update info text"""
self.info.text = text
if color is not None:
self.info.color = color
def draw_game_board(self, pos):
"""Draw the game board"""
for c in self.coords.coords:
rect = c.rect
if pos_in_rect(rect, pos):
color = Colors.grey_dark
else:
color = Colors.grey
wall_east = c.wall_east
wall_south = c.wall_south
if wall_east and pos_in_rect(wall_east.rect_small, pos):
wall_east.draw(Colors.grey_dark)
elif wall_south and pos_in_rect(wall_south.rect_small, pos):
wall_south.draw(Colors.grey_dark)
c.draw(color)
def draw_finish_lines(self, players):
"""Draw the finish lines with the player's color"""
for p in players.players:
if p.name != '':
if p.orient == "north":
pygame.draw.line(
self.win, p.color,
(self.top_left[0], self.top_left[1] + self.side_board),
(self.top_left[0] + self.side_board,
self.top_left[1] + self.side_board),
self.wall_width)
elif p.orient == "east":
pygame.draw.line(
self.win, p.color,
(self.top_left[0], self.top_left[1]),
(self.top_left[0], self.top_left[1] + self.side_board),
self.wall_width)
elif p.orient == "south":
pygame.draw.line(
self.win, p.color,
(self.top_left[0], self.top_left[1]),
(self.top_left[0] + self.side_board, self.top_left[1]),
self.wall_width)
elif p.orient == "west":
pygame.draw.line(
self.win, p.color,
(self.top_left[0] + self.side_board, self.top_left[1]),
(self.top_left[0] + self.side_board,
self.top_left[1] + self.side_board),
self.wall_width)
def draw_right_panel(self, game, players):
"""Draw the right panel with player's informations"""
x, y = self.side_board + 50, 20
self.title.draw(self.win, (x + 10, y))
for p in players.players:
if p.name != '':
text_p = Text(f"{p.name}: {p.walls_remain} walls", p.color)
text_p.draw(self.win, (x, y + 100*p.num_player + 100))
def draw_buttons(self):
"""Draw buttons"""
for b in self.buttons:
if b.show:
b.draw(self.win)
def redraw_window(self, game, players, walls, pos):
"""Redraw the full window"""
self.win.fill(self.bgcolor)
self.draw_game_board(pos)
self.draw_finish_lines(players)
self.draw_right_panel(game, players)
self.draw_buttons()
players.draw(self)
walls.draw()
self.info.draw(self.win, (self.top_left[0], self.height - 50))
pygame.display.update()
```
#### File: server/src/game.py
```python
class Game:
"""Create a game"""
def __init__(self, game_id, nb_players):
self.game_id = game_id
self.nb_players = nb_players
self.connected = [False] * nb_players
self.names = [''] * nb_players
self.run = False
self.current_player = -1
self.last_play = ''
self.winner = ''
self.wanted_restart = []
def add_player(self, num_player):
"""Add a player"""
self.connected[num_player] = True
print(f"[Game {self.game_id}]: player {num_player} added")
def add_name(self, data):
"""Add a player's name"""
num_player = int(data.split(';')[1])
name = data.split(';')[2]
self.names[num_player] = name
print(f"[Game {self.game_id}]: {name} added as player {num_player}")
def remove_player(self, num_player):
"""Remove a player"""
self.connected[num_player] = False
self.names[num_player] = ''
if self.nb_players_connected() == 1:
self.run = False
self.current_player == -1
else:
if num_player == self.current_player:
self.current_player = self.next_player(self.current_player)
self.last_play = ';'.join(['D', str(num_player)])
print(f"[Game {self.game_id}]: player {num_player} removed")
def nb_players_connected(self):
"""Return the number of players connected"""
return self.connected.count(True)
def players_missing(self):
"""Return the number of players missing to start"""
return self.nb_players - self.nb_players_connected()
def ready(self):
"""Return True if the game can start"""
return self.nb_players_connected() == self.nb_players
def next_player(self, current):
"""Return the new current player"""
current = (current + 1) % self.nb_players
while not self.connected[current]:
current = (current + 1) % self.nb_players
return current
def get_name_current(self):
"""Return the name of the current player"""
return self.names[self.current_player]
def start(self):
"""Start the game"""
self.winner = ''
self.current_player = self.next_player(-1)
self.run = True
print(f"[Game {self.game_id}]: started")
def play(self, data):
"""Get a move"""
print(f"[Game {self.game_id}]: move {data}")
self.last_play = data
if data.split(';')[-1] == 'w':
print(f"[Game {self.game_id}]: {self.get_name_current()} wins!")
self.winner = self.get_name_current()
self.current_player = -1
self.run = False
self.wanted_restart = []
else:
self.current_player = self.next_player(self.current_player)
def restart(self, data):
"""Restart the game if there are enough players"""
num_player = int(data.split(';')[1])
self.wanted_restart.append(num_player)
print(f"[Game {self.game_id}]: {self.names[num_player]} asked restart",
end=' ')
print(f"{len(self.wanted_restart)}/{self.nb_players}")
if len(self.wanted_restart) == self.nb_players:
self.start()
class Games:
"""Manage games"""
def __init__(self, nb_players):
self.games = {}
self.nb_players = nb_players
self.num_player = 0
self.game_id = 0
def find_game(self, game_id):
"""Find a game"""
if game_id in self.games:
return self.games[game_id]
return None
def add_game(self):
"""Create a new game"""
if self.game_id not in self.games:
self.num_player = 0
self.games[self.game_id] = Game(self.game_id, self.nb_players)
print(f"[Game {self.game_id}]: created")
def del_game(self, game_id):
"""Delete a game"""
if game_id in self.games:
del self.games[game_id]
print(f"[Game {game_id}]: closed")
if game_id == self.game_id:
self.num_player = 0
def accept_player(self):
"""Accept a player"""
if self.game_id not in self.games:
self.add_game()
self.games[self.game_id].add_player(self.num_player)
return self.game_id, self.num_player
def launch_game(self):
"""Lauch a game"""
if self.games[self.game_id].ready():
self.games[self.game_id].start()
self.game_id += 1
else:
self.num_player += 1
def remove_player(self, game_id, num_player):
"""Remove a player"""
game = self.find_game(game_id)
if game is not None:
game.remove_player(num_player)
if not game.run:
self.del_game(game_id)
``` |
{
"source": "Joshmantova/Neural-Style-Transfer",
"score": 2
} |
#### File: Neural-Style-Transfer/src/style_transfer.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import torchvision.models as models
import copy
import time
class ContentLoss(nn.Module):
def __init__(self, target,):
super(ContentLoss, self).__init__()
self.target = target.detach()
def forward(self, input):
self.loss = F.mse_loss(input, self.target)
return input
class StyleLoss(nn.Module):
def __init__(self, target_feature):
super(StyleLoss, self).__init__()
self.target = gram_matrix(target_feature).detach()
def forward(self, input):
G = gram_matrix(input)
self.loss = F.mse_loss(G, self.target)
return input
class Normalization(nn.Module):
def __init__(self, mean, std):
super(Normalization, self).__init__()
self.mean = torch.tensor(mean).view(-1, 1, 1)
self.std = torch.tensor(std).view(-1, 1, 1)
def forward(self, img):
return (img - self.mean) / self.std
def gram_matrix(input):
a, b, c, d = input.size()
features = input.view(a * b, c * d)
G = torch.mm(features, features.t())
return G.div(a * b * c * d)
def get_input_optimizer(input_img):
optimizer = optim.LBFGS([input_img.requires_grad_()])
return optimizer
def get_style_model_and_losses(cnn, normalization_mean, normalization_std, style_img, content_img,
content_layers=['conv_4'],
style_layers=['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']):
cnn = copy.deepcopy(cnn)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
normalization = Normalization(normalization_mean, normalization_std).to(device)
content_losses = []
style_losses = []
model = nn.Sequential(normalization)
i = 0
for layer in cnn.children():
if isinstance(layer, nn.Conv2d):
i += 1
name = f'conv_{i}'
elif isinstance(layer, nn.ReLU):
name = f"relu_{i}"
layer = nn.ReLU(inplace=False)
elif isinstance(layer, nn.MaxPool2d):
name = f"pool_{i}"
elif isinstance(layer, nn.BatchNorm2d):
name = f"bn_{i}"
else:
raise RuntimeError(f'Unrecognized layer: {layer.__class__.__name__}')
model.add_module(name, layer)
if name in content_layers:
target = model(content_img).detach()
content_loss = ContentLoss(target)
model.add_module(f"content_loss_{i}", content_loss)
content_losses.append(content_loss)
if name in style_layers:
target_feature = model(style_img).detach()
style_loss = StyleLoss(target_feature)
model.add_module(f"style_loss_{i}", style_loss)
style_losses.append(style_loss)
for i in range(len(model) - 1, -1, -1):
if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
break
model = model[:(i + 1)]
return model, style_losses, content_losses
def run_style_transfer(cnn, normalization_mean, normalization_std, content_img, style_img,
input_img, num_steps=300, style_weight=1e6, content_weight=1):
print("Building the style transfer model..")
model, style_losses, content_losses = get_style_model_and_losses(cnn, normalization_mean, normalization_std,
style_img, content_img)
optimizer = get_input_optimizer(input_img)
print('Optimizing..')
run = [0]
while run[0] <= num_steps:
def closure():
input_img.data.clamp_(0, 1)
optimizer.zero_grad()
model(input_img)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.loss
for cl in content_losses:
content_score += cl.loss
style_score *= style_weight
content_score *= content_weight
loss = style_score + content_score
loss.backward()
run[0] += 1
if run[0] % 50 == 0:
print(f"run {run}:")
print(f"Style Loss: {style_score.item():4f} Content Loss: {content_score.item():4f}")
print()
return style_score + content_score
optimizer.step(closure)
input_img.data.clamp_(0, 1)
yield input_img, run[0]
# input_img.data.clamp_(0, 1)
# return input_img
def image_loader(image, imsize, device):
loader = transforms.Compose([
transforms.Resize(imsize),
transforms.ToTensor()
])
if type(image) == str:
image = Image.open(image)
image = image.resize((444, 444))
image = loader(image).unsqueeze(0)
return image.to(device, torch.float)
def imshow(tensor):
unloader = transforms.ToPILImage()
image = tensor.cpu().clone()
image = image.squeeze(0)
image = unloader(image)
return image
# plt.imshow(image)
# if title:
# plt.title(title)
# plt.pause(0.001)
if __name__ == "__main__":
start = time.time()
# plt.ion()
plt.ioff()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
imsize = 512 if torch.cuda.is_available() else 128
# imsize = 512
loader = transforms.Compose([
transforms.Resize(imsize),
transforms.ToTensor()
])
style_image_name = 'Alex_Grey_Over_Soul.jpg'
# content_image_name = 'dancing.jpg'
content_image_name = '../../../../downloads/1200px-Eopsaltria_australis_-_Mogo_Campground.jpg'
style_img = image_loader(f'../imgs/{style_image_name}')
content_img = image_loader(content_image_name)
assert style_img.size() == content_img.size()
# plt.figure()
# imshow(style_img, title='Style Image')
# plt.figure()
# imshow(content_img, title='Content Image')
cnn = models.vgg19(pretrained=True).features.to(device).eval() #pretrained on imagenet
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)
input_img = content_img.clone()
# input_img = torch.randn(content_img.data.size(), device=device)
# plt.figure()
# imshow(input_img, title='Input Image')
output = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std,
content_img, style_img, input_img, style_weight=1e4)
end = time.time()
print(f"Time taken to train: {(end-start)/60} minutes")
plt.figure()
imshow(output, title='Output Image')
plt.savefig(f'Styled input image bird + {style_image_name} less style.png')
plt.show()
``` |
{
"source": "joshmanzano/twa-simulator",
"score": 3
} |
#### File: joshmanzano/twa-simulator/twasimulator.py
```python
class Program:
def __init__(self, states):
self.states = states
self.tape = '##'
self.tapePointer = 0
def addLog(self, string):
self.log.append(string)
def process(self, string, startState):
self.log = []
self.tape = '#' + string + '#'
self.currentState = self.states[startState]
self.addLog('Starting at State ' + str(startState))
self.addLog('===')
while(True):
action = self.currentState.action
if(action == 'right'):
self.addLog('Moving tape to the right')
self.tapePointer += 1
logTape = self.tape[:self.tapePointer] + '[' + self.tape[self.tapePointer] + ']' + self.tape[self.tapePointer+1:]
self.addLog(logTape)
elif(action == 'left'):
self.addLog('Moving tape to the left')
if(self.tapePointer > 0):
self.tapePointer -= 1
logTape = self.tape[:self.tapePointer] + '[' + self.tape[self.tapePointer] + ']' + self.tape[self.tapePointer+1:]
self.addLog(logTape)
elif(action == 'accept'):
return {'result': True, 'log': self.log}
elif(action == 'reject'):
return {'result': False, 'log': self.log}
toState = self.currentState.transition(self.tape[self.tapePointer])
self.addLog('Moving from State ' + str(self.currentState.stateNumber) + ' to State ' + str(toState))
self.currentState = self.states[toState]
self.addLog('===')
class State:
def __init__(self, action, stateNumber):
self.action = action
self.stateNumber = stateNumber
self.transitions = {}
def addTransition(self, character, stateNumber):
self.transitions[character] = stateNumber
def transition(self, character):
return self.transitions[character]
import json
def simulate(data):
states = {}
print(data)
for stateData in data['states']:
state = State(stateData['action'], int(stateData['stateNumber']))
for transition in stateData['transitions']:
try:
state.addTransition(transition['character'], int(transition['stateNumber']))
except:
state.addTransition(transition['character'], transition['stateNumber'])
states[int(stateData['stateNumber'])] = state
program = Program(states)
return program.process(data['string'],1)
``` |
{
"source": "joshmarshall/mogo",
"score": 3
} |
#### File: mogo/mogo/connection.py
```python
from urllib.parse import urlparse
from pymongo import MongoClient
from pymongo.collection import Collection
from pymongo.database import Database
from pymongo.errors import ConnectionFailure
from types import TracebackType
from typing import Any, Optional, Type
class Connection(object):
"""
This just caches a pymongo connection and adds
a few shortcuts.
"""
_instance = None # type: Optional['Connection']
_database = None # type: Optional[str]
connection = None # type: Optional[MongoClient]
@classmethod
def instance(cls) -> "Connection":
""" Retrieves the shared connection. """
if not cls._instance:
cls._instance = Connection()
return cls._instance
@classmethod
def connect(
cls, database: Optional[str] = None,
uri: str = "mongodb://localhost:27017",
**kwargs: Any) -> MongoClient:
"""
Wraps a pymongo connection.
TODO: Allow some of the URI stuff.
"""
if not database:
database = urlparse(uri).path
while database.startswith("/"):
database = database[1:]
if not database:
raise ValueError("A database name is required to connect.")
conn = cls.instance()
conn._database = database
conn.connection = MongoClient(uri, **kwargs)
return conn.connection
def get_database(self, database: Optional[str] = None) -> Database:
""" Retrieves a database from an existing connection. """
if not self.connection:
raise ConnectionFailure('No connection')
if not database:
if not self._database:
raise Exception('No database submitted')
database = self._database
return self.connection[database]
def get_collection(
self,
collection: str,
database: Optional[str] = None) -> Collection:
""" Retrieve a collection from an existing connection. """
return self.get_database(database=database)[collection]
class Session(object):
""" This class just wraps a connection instance """
connection = None # type: Optional[Connection]
database = None # type: Optional[str]
args = None # type: Any
kwargs = None # type: Any
def __init__(self, database: str, *args: Any, **kwargs: Any) -> None:
""" Stores a connection instance """
self.connection = None
self.database = database
self.args = args
self.kwargs = kwargs
def connect(self) -> None:
""" Connect to MongoDB """
connection = Connection()
connection._database = self.database
connection.connection = MongoClient(*self.args, **self.kwargs)
self.connection = connection
def disconnect(self) -> None:
# PyMongo removed the disconnect keyword, close() is now used.
self.close()
def close(self) -> None:
if self.connection is not None and \
self.connection.connection is not None:
self.connection.connection.close()
def __enter__(self) -> 'Session':
""" Open the connection """
self.connect()
return self
def __exit__(
self,
exc_type: Optional[Type[Exception]],
exc_value: Optional[Exception],
traceback: Optional[TracebackType]) -> None:
""" Close the connection """
self.disconnect()
def connect(*args: Any, **kwargs: Any) -> MongoClient:
"""
Initializes a connection and the database. It returns
the pymongo connection object so that end_request, etc.
can be called if necessary.
"""
return Connection.connect(*args, **kwargs)
def session(database: str, *args: Any, **kwargs: Any) -> Session:
"""
Returns a session object to be used with the `with` statement.
"""
return Session(database, *args, **kwargs)
__all__ = ["connect", "session"]
```
#### File: mogo/mogo/decorators.py
```python
from typing import Any, cast, Callable, Optional, Type, TypeVar, Union
T = TypeVar("T")
class notinstancemethod(object):
"""
Used to refuse access to a classmethod if called from an instance.
"""
def __init__(self, func: Union[Callable[..., T], classmethod]) -> None:
if type(func) is not classmethod:
raise ValueError("`notinstancemethod` called on non-classmethod")
self.func = func
def __get__(
self, obj: Any,
objtype: Optional[Type[Any]] = None) -> Callable[..., T]:
if obj is not None:
raise TypeError("Cannot call this method on an instance.")
return cast(classmethod, self.func).__get__(obj, objtype)
```
#### File: mogo/mogo/helpers.py
```python
from typing import Optional, TypeVar
T = TypeVar("T")
def check_none(value: Optional[T]) -> T:
if value is None:
raise ValueError("Value is unexpectedly None.")
return value
```
#### File: mogo/tests/test_usage.py
```python
from datetime import datetime
import unittest
import warnings
from bson.objectid import ObjectId
import mogo
from mogo import PolyModel, Model, Field, ReferenceField, DESC, connect
from mogo import ConstantField
from mogo.connection import Connection
from mogo.cursor import Cursor
from mogo.model import UnknownField
import pymongo
from pymongo.collation import Collation
from pymongo.errors import OperationFailure
from typing import Any, cast, Optional, Type, TypeVar
T = TypeVar("T")
DBNAME = "_mogotest"
ALTDB = "_mogotest2"
DELETE = True
class Foo(Model):
bar = Field(str)
typeless = Field[Any]()
dflt = Field(str, default="dflt")
callme = Field(str, default=lambda: "funtimes")
dtnow = Field(datetime, default=lambda: datetime.now())
def __unicode__(self) -> str:
return "FOOBAR"
Foo.ref = ReferenceField(Foo)
F = TypeVar("F", bound="FooWithNew")
class FooWithNew(Model):
bar = Field(str)
@classmethod
def new(cls: Type[F], **kwargs: Any) -> F:
return cls(bar="whatever")
class Company(Model):
name = Field[str](str)
@property
def people(self) -> Cursor["Person"]:
return Person.search(company=self)
class Person(Model):
_name = "people"
company = ReferenceField(Company)
name = Field[str](str)
email = Field[str](str)
class SubPerson(Person):
""" Testing inheritance """
another_field = Field[str](str)
class Car(PolyModel):
""" Base model for alternate inheritance """
doors = Field[int](int, default=4)
wheels = Field[int](int, default=4)
type = Field[str](str, default="car")
@classmethod
def get_child_key(cls) -> str:
return "type"
def drive(self) -> bool:
""" Example method to overwrite """
raise NotImplementedError("Implement this in child classes")
@Car.register("sportscar_value")
class SportsCar(Car):
""" Alternate car """
doors = Field[int](int, default=2)
type = Field[str](str, default="sportscar_value")
def drive(self) -> bool:
""" Overwritten """
return True
@Car.register
class Convertible(SportsCar):
""" New methods """
_top_down = False # type: bool
type = Field[str](str, default="convertible")
def toggle_roof(self) -> bool:
""" Opens / closes roof """
self._top_down = not self._top_down
return self._top_down
class TestMogoGeneralUsage(unittest.TestCase):
def setUp(self) -> None:
self._conn = connect(DBNAME)
def assert_not_none(self, obj: Optional[T]) -> T:
# this is just a custom version of assertIsNotNone that
# returns the object of the correct type if it's not null
if obj is None:
self.fail("Object unexpectedly none.")
return obj
def test_connect_populates_database(self) -> None:
self.assertRaises(ValueError, connect)
self.assertIsInstance(self._conn, pymongo.MongoClient)
connection = Connection.instance()
self.assertEqual(connection._database, DBNAME)
self._conn.close()
def test_uri_connect_populates_database_values(self) -> None:
conn = connect(uri="mongodb://localhost/{}".format(DBNAME))
self.assertIsInstance(conn, pymongo.MongoClient)
connection = Connection.instance()
self.assertEqual(connection._database, DBNAME)
conn.close()
# overriding the database name
conn = connect(DBNAME, uri="mongodb://localhost/foobar")
self.assertIsInstance(conn, pymongo.MongoClient)
connection = Connection.instance()
self.assertEqual(connection._database, DBNAME)
conn.close()
def test_model_construction_populates_field_data(self) -> None:
foo = Foo(bar="cheese")
self.assertEqual(foo.bar, "cheese")
self.assertEqual(foo.dflt, "dflt")
self.assertEqual(foo.callme, "funtimes")
self.assertIsInstance(foo.dtnow, datetime)
foo.bar = "model"
self.assertEqual(foo.bar, "model")
def test_model_create_saves_model_into_database(self) -> None:
foo = Foo.create(bar="cheese")
self.assertEqual(foo.bar, "cheese")
self.assertEqual(Foo.find().count(), 1)
# testing with a classmethod "new" defined.
foo2 = FooWithNew.create()
self.assertIsNotNone(foo2._id)
self.assertEqual(foo2.bar, "whatever")
def test_save_includes_default_fields_in_database(self) -> None:
foo = Foo(bar="goat")
id_ = foo.save(w=1)
raw_result = self.assert_not_none(
Foo._get_collection().find_one({"_id": id_}))
self.assertEqual(raw_result["dflt"], "dflt")
def test_create_stores_updates_id_for_model(self) -> None:
foo = Foo()
foo.bar = "create_delete"
idval = foo.save()
self.assertIs(type(idval), ObjectId)
self.assertEqual(foo.id, idval)
def test_search_or_create_inserts_and_updates_accordingly(self) -> None:
foo = Foo.search_or_create(bar="howdy")
self.assertIsInstance(foo._id, ObjectId)
foo.typeless = 4
foo.save()
baz = Foo.search_or_create(bar="howdy", typeless=2)
self.assertNotEqual(foo.id, baz.id)
self.assertEqual(baz.typeless, 2)
qux = Foo.search_or_create(bar="howdy", typeless=4)
self.assertEqual(foo.id, qux.id)
self.assertEqual(qux.typeless, 4)
def test_find_one_returns_first_matching_entry(self) -> None:
foo = Foo()
foo.bar = "find_one"
idval = foo.save()
foo2 = self.assert_not_none(Foo.find_one({"bar": "find_one"}))
self.assertEqual(foo2._get_id(), idval)
self.assertEqual(foo2, foo)
def test_find_one_returns_none_if_not_existing(self) -> None:
self.assertIsNone(Foo.find_one({}))
def test_find_one_raises_when_keyword_arguments_are_provided(self) -> None:
foo = Foo.new(bar="bad_find_one")
foo.save()
item = foo.find_one()
self.assertIsNotNone(item)
item = foo.find_one({})
self.assertIsNotNone(item)
with self.assertRaises(ValueError):
foo.find_one(bar="bad_find_one")
def test_remove_raises_when_keyword_arguments_are_provided(self) -> None:
foo = Foo.create(bar="testing")
foo.save()
with self.assertRaises(ValueError):
Foo.remove(bar="testing")
with self.assertRaises(ValueError):
Foo.remove()
self.assertEqual(Foo.count(), 1)
def test_class_remove_respects_multi_parameters(self) -> None:
class Mod(Model):
val = Field(int)
mod = Field(int)
for i in range(100):
foo = Mod(val=i, mod=i % 2)
foo.save()
matches = Mod.find({"mod": 1}).count()
Mod.remove({"mod": 1})
self.assertEqual(matches - 1, Mod.find({"mod": 1}).count())
Mod.remove({"mod": 1}, multi=True)
self.assertEqual(0, Mod.find({"mod": 1}).count())
def test_count_returns_total_number_of_stored_entries(self) -> None:
foo = Foo()
foo.bar = "count"
foo.save()
count = Foo.count()
self.assertEqual(count, 1)
def test_grab_returns_instance_by_id(self) -> None:
foo = Foo()
foo.bar = "grab"
idval = foo.save()
newfoo = self.assert_not_none(Foo.grab(str(idval)))
self.assertEqual(newfoo.id, idval)
self.assertEqual(newfoo._id, idval)
def test_find_returns_model_instances_from_iterator(self) -> None:
foo = Foo()
foo.bar = "find"
foo.save()
foo2 = Foo()
foo2.bar = "find"
foo2.save()
result = Foo.find({"bar": "find"})
self.assertEqual(result.count(), 2)
f = result[0] # should be first one
self.assertIs(type(f), Foo)
self.assertEqual(f.bar, "find")
for f in result:
self.assertIs(type(f), Foo)
def test_find_next_method_returns_constructed_models(self) -> None:
# this is mostly to verify Python 3 compatibility with the next()
foo = Foo.create(bar="find")
foo2 = Foo.create(bar="find")
result = Foo.find({"bar": "find"})
self.assertEqual(foo, result.next())
self.assertEqual(foo2, result.next())
with self.assertRaises(StopIteration):
result.next()
def test_find_len_returns_count_of_results_from_query(self) -> None:
foo = Foo(bar="find")
foo.save()
foo2 = Foo(bar="find")
foo2.save()
result = Foo.find({"bar": "find"})
self.assertEqual(result.count(), 2)
self.assertEqual(len(result), 2)
def test_find_raises_when_keyword_arguments_provided(self) -> None:
foo = Foo.new(bar="bad_find")
foo.save()
cursor = foo.find()
self.assertTrue(cursor.count())
cursor = foo.find({})
self.assertTrue(cursor.count())
with self.assertRaises(ValueError):
foo.find(bar="bad_find")
def test_cursor_supports_sort_passthrough(self) -> None:
Foo.create(bar="zzz")
Foo.create(bar="aaa")
Foo.create(bar="ggg")
results = [f.bar for f in Foo.find().sort("bar")]
self.assertEqual(["aaa", "ggg", "zzz"], results)
def test_cursor_supports_skip_and_limit_passthrough(self) -> None:
Foo.create(bar="aaa")
Foo.create(bar="ggg")
Foo.create(bar="zzz")
results = [f.bar for f in Foo.find().sort("bar").skip(1).limit(1)]
self.assertEqual(["ggg"], results)
def test_cursor_supports_close_passthrough(self) -> None:
for i in range(10):
Foo.create(bar="ggg")
cursor = Foo.find()
cursor.close()
with self.assertRaises(StopIteration):
cursor.next()
def test_cursor_supports_rewind_passthrough(self) -> None:
for i in range(10):
Foo.create(bar="ggg")
cursor = Foo.find()
results1 = list(cursor)
with self.assertRaises(StopIteration):
cursor.next()
cursor = cursor.rewind()
results2 = list(cursor)
self.assertEqual(results1, results2)
def test_cursor_supports_collation_passthrough(self) -> None:
for c in ["Z", "a", "B", "z", "A", "b"]:
Foo.create(bar=c)
cursor = Foo.find()
cursor = cursor.collation(Collation(locale="en_US"))
cursor.sort("bar")
results = [f.bar for f in cursor]
self.assertEqual(["a", "A", "b", "B", "z", "Z"], results)
def test_setattr_updates_field_values(self) -> None:
foo = Foo(bar="baz")
foo.save()
self.assertIsNotNone(Foo.grab(foo.id))
setattr(foo, "bar", "quz")
self.assertEqual(foo.bar, "quz")
self.assertEqual(getattr(foo, "bar"), "quz")
foo.save()
result = self.assert_not_none(Foo.grab(foo.id))
self.assertEqual(result.bar, "quz")
def test_save_updates_existing_entry(self) -> None:
foo = Foo()
foo.bar = "update"
foo.save()
result = self.assert_not_none(Foo.find_one({"bar": "update"}))
result["hidden"] = True
setattr(result, "bar", "new update")
result.save()
result2 = self.assert_not_none(Foo.find_one({"bar": "new update"}))
self.assertEqual(result.id, result2.id)
self.assertEqual(result, result2)
self.assertTrue(result["hidden"])
self.assertTrue(result2["hidden"])
self.assertEqual(result2.bar, "new update")
self.assertEqual(result.bar, "new update")
def test_new_fields_added_to_model_with_global_auto_create(self) -> None:
try:
mogo.AUTO_CREATE_FIELDS = True
class Flexible(Model):
pass
instance = Flexible(foo="bar", age=5)
instance.save()
self.assertEqual(instance["foo"], "bar")
self.assertEqual(instance.foo, "bar") # type: ignore
self.assertEqual(instance["age"], 5)
self.assertEqual(instance.age, 5) # type: ignore
retrieved = self.assert_not_none(Flexible.find_one())
self.assertEqual(retrieved, instance)
# Test that the flexible fields were set
self.assertEqual(instance.foo, "bar") # type: ignore
self.assertEqual(instance.age, 5) # type: ignore
finally:
mogo.AUTO_CREATE_FIELDS = False
def test_new_fields_added_with_auto_create_on_model(self) -> None:
""" Overwrite on a per-model basis """
class Flexible(Model):
AUTO_CREATE_FIELDS = True
instance = Flexible.create(foo="bar", age=5)
self.assertEqual("bar", instance.foo) # type: ignore
self.assertEqual(5, instance.age) # type: ignore
def test_model_auto_create_setting_overrules_global_config(self) -> None:
try:
mogo.AUTO_CREATE_FIELDS = True
class Flexible(Model):
AUTO_CREATE_FIELDS = False
with self.assertRaises(UnknownField):
Flexible.create(foo="bar", age=5)
finally:
mogo.AUTO_CREATE_FIELDS = False
def test_class_update_affects_all_matching_documents(self) -> None:
class Mod(Model):
val = Field(int)
mod = Field(int)
for i in range(100):
foo = Mod(val=i, mod=i % 2)
foo.save()
Mod.update({"mod": 1}, {"$set": {"mod": 0}})
self.assertEqual(Mod.search(mod=0).count(), 51)
Mod.update(
{"mod": 1}, {"$set": {"mod": 0}}, multi=True)
self.assertEqual(Mod.search(mod=0).count(), 100)
def test_instance_update_only_affects_single_instance(self) -> None:
class Mod(Model):
val = Field(int)
mod = Field(int)
for i in range(100):
foo = Mod(val=i, mod=i % 2)
foo.save()
foo = self.assert_not_none(Mod.find_one({"mod": 1}))
with self.assertRaises(TypeError):
foo.update(mod="testing")
foo.update(mod=5)
self.assertEqual(foo.mod, 5)
foo2 = self.assert_not_none(Mod.grab(foo.id))
self.assertEqual(foo2.mod, 5)
self.assertEqual(Mod.search(mod=5).count(), 1)
def test_cursor_update_affects_all_matching_documents(self) -> None:
class Atomic(Model):
value = Field(int)
key = Field(str, default="foo")
unchanged = Field(default="original")
for i in range(10):
atomic = Atomic(value=i)
if i % 2:
atomic.key = "bar"
atomic.save()
Atomic.find({"key": "bar"}).update({"$inc": {"value": 100}})
Atomic.find({"key": "foo"}).change(key="wut")
self.assertEqual(5, Atomic.find({"key": "wut"}).count())
self.assertEqual(5, Atomic.find({"value": {"$gt": 100}}).count())
self.assertEqual(10, Atomic.find({"unchanged": "original"}).count())
def test_reference_field_stores_dbref_and_returns_model(self) -> None:
foo = Foo()
foo.bar = "ref"
foo.save()
new = self.assert_not_none(Foo.find_one({"bar": "ref"}))
new.ref = foo # type: ignore
new.save()
result2 = self.assert_not_none(Foo.find_one({"bar": "ref"}))
self.assertEqual(result2.ref, foo) # type: ignore
def test_search_accepts_keywords(self) -> None:
nothing = Foo.search(bar="whatever").first()
self.assertEqual(nothing, None)
foo = Foo()
foo.bar = "search"
foo.save()
result = foo.search(bar="search")
self.assertEqual(result.count(), 1)
self.assertEqual(result.first(), foo)
def test_search_populates_fields_to_verify_keywords(self) -> None:
""" Testing the bug where fields are not populated before search. """
class Bar(Model):
field = Field[Any]()
insert_result = self._conn[DBNAME]["bar"].insert_one({"field": "test"})
result_id = insert_result.inserted_id
result = self.assert_not_none(Bar.search(field="test").first())
self.assertEqual(result.id, result_id)
def test_remove_access_on_instance_raises_error(self) -> None:
foo = Foo()
foo.bar = "bad_remove"
foo.save()
with self.assertRaises(TypeError):
getattr(foo, "remove")
def test_drop_access_on_instance_raises_error(self) -> None:
foo = Foo()
foo.bar = "bad_drop"
foo.save()
with self.assertRaises(TypeError):
getattr(foo, "drop")
def test_search_accepts_model_instance_for_reference_field(self) -> None:
company = Company(name="Foo, Inc.")
company.save()
user = Person(name="Test", email="<EMAIL>")
user.company = company
user.save()
self.assertEqual(company.people.count(), 1)
def test_group_passes_args_to_cursor_and_is_depreceted(self) -> None:
db = self._conn[DBNAME]
for i in range(100):
obj = {"alt": i % 2, "count": i}
db["counter"].insert_one(obj)
class Counter(Model):
pass
with warnings.catch_warnings():
warnings.simplefilter("error")
with self.assertRaises((DeprecationWarning, OperationFailure)):
result = Counter.group(
key={"alt": 1},
condition={"alt": 0},
reduce="function (obj, prev) { prev.count += obj.count; }",
initial={"count": 0})
self.assertEqual(result[0]["count"], 2450) # type: ignore
def test_order_on_cursor_accepts_field_keywords(self) -> None:
class OrderTest(Model):
up = Field(int)
down = Field(int)
mod = Field(int)
for i in range(100):
obj = OrderTest(up=i, down=99 - i, mod=i % 10)
obj.save()
results = []
query1 = OrderTest.search().order(up=DESC)
query2 = OrderTest.search().order(mod=DESC).order(up=DESC)
for obj in query1:
results.append(obj.up)
if len(results) == 5:
break
self.assertEqual(results, [99, 98, 97, 96, 95])
mod_result = self.assert_not_none(query2.first())
self.assertEqual(mod_result.mod, 9)
self.assertEqual(mod_result.up, 99)
def test_subclasses_store_in_parent_database(self) -> None:
""" Test simple custom model inheritance """
person = Person(name="Testing")
subperson = SubPerson(name="Testing", another_field="foobar")
person.save()
subperson.save()
self.assertEqual(Person.find().count(), 2)
# Doesn"t automatically return instances of proper type yet
self.assertEqual(Person.find()[0].name, "Testing")
self.assertEqual(Person.find()[1]["another_field"], "foobar")
def test_poly_models_construct_from_proper_class(self) -> None:
""" Test the mogo support for model inheritance """
self.assertEqual(Car._get_name(), SportsCar._get_name())
self.assertEqual(Car._get_collection(), SportsCar._get_collection())
car = Car()
with self.assertRaises(NotImplementedError):
car.drive()
self.assertEqual(car.doors, 4)
self.assertEqual(car.wheels, 4)
self.assertEqual(car.type, "car")
car.save()
self.assertEqual(Car.find().count(), 1)
car2 = self.assert_not_none(Car.find().first())
self.assertEqual(car, car2)
self.assertEqual(car.copy(), car2.copy())
self.assertIsInstance(car2, Car)
sportscar = SportsCar()
sportscar.save()
self.assertTrue(sportscar.drive())
self.assertEqual(sportscar.doors, 2)
self.assertEqual(sportscar.wheels, 4)
self.assertEqual(sportscar.type, "sportscar_value")
self.assertEqual(SportsCar.find().count(), 1)
sportscar2 = self.assert_not_none(SportsCar.find().first())
self.assertEqual(sportscar2.doors, 2)
self.assertEqual(sportscar2.type, "sportscar_value")
self.assertEqual(Car.find().count(), 2)
sportscar3 = self.assert_not_none(Car.find({"doors": 2}).first())
self.assertIsInstance(sportscar3, SportsCar)
self.assertTrue(sportscar3.drive())
convertible = cast(Convertible, Car(type="convertible"))
convertible.save()
self.assertEqual(convertible.doors, 2)
self.assertTrue(convertible.toggle_roof())
self.assertFalse(convertible.toggle_roof())
all_cars = list(Car.find())
self.assertEqual(len(all_cars), 3)
self.assertIsInstance(all_cars[0], Car)
self.assertIsInstance(all_cars[1], SportsCar)
self.assertIsInstance(all_cars[2], Convertible)
self.assertEqual(SportsCar.search().count(), 1)
self.assertEqual(Convertible.find_one(), convertible)
self.assertEqual(SportsCar.first(), Car.first(type="sportscar_value"))
def test_all_string_representation_methods_call__unicode__(self) -> None:
""" Test __repr__, __str__, and __unicode__ """
repr_result = repr(Foo())
str_result = Foo().__str__()
str_fn_result = str(Foo())
unicode_fn_result = Foo().__unicode__()
hypo = "FOOBAR"
self.assertTrue(
unicode_fn_result == repr_result == str_result ==
str_fn_result == hypo)
def test_model_use_supports_alternate_sessions(self) -> None:
""" Test using a session on a model """
foo = Foo()
foo.save()
self.assertEqual(Foo.find().count(), 1)
session = mogo.session(ALTDB)
session.connect()
FooWrapped = Foo.use(session)
self.assertEqual(FooWrapped._get_name(), Foo._get_name())
self.assertEqual(FooWrapped.find().count(), 0)
coll = cast(Connection, session.connection).get_collection("foo")
self.assertEqual(coll.count_documents({}), 0)
foo2 = FooWrapped()
foo2.save()
self.assertEqual(coll.count_documents({}), 1)
session.close()
def test_session_context_returns_session_instance(self) -> None:
""" Test the with statement alternate connection """
with mogo.session(ALTDB) as s:
foo = Foo.use(s)(bar="testing_with_statement")
foo.save()
results = Foo.use(s).find({"bar": "testing_with_statement"})
self.assertEqual(results.count(), 1)
result = results.first()
self.assertEqual(result, foo)
count = Foo.find().count()
self.assertEqual(count, 0)
def test_constant_field_allows_setting_before_saving(self) -> None:
class ConstantModel(Model):
name = Field(str, required=True)
constant = ConstantField(int, required=True)
model = ConstantModel(name="whatever", constant=10)
self.assertEqual(10, model.constant)
model.constant = 5
model.save()
self.assertEqual(5, model.constant)
def test_constant_field_allows_setting_to_same_value(self) -> None:
class ConstantModel(Model):
name = Field(str, required=True)
constant = ConstantField(int, required=True)
model = ConstantModel.create(name="whatever", constant=5)
model.constant = 5
self.assertEqual(5, model.constant)
def test_constant_field_cannot_be_changed_after_save(self) -> None:
class ConstantModel(Model):
name = Field(str, required=True)
constant = ConstantField(int, required=True)
model = ConstantModel.create(name="whatever", constant=5)
with self.assertRaises(ValueError):
model.constant = 10
self.assertEqual(5, model.constant)
def test_custom_callbacks_override_default_behavior(self) -> None:
class CustomField(Field[int]):
def _get_callback(self, instance: Model, value: Any) -> int:
return 5
def _set_callback(self, instance: Model, value: Any) -> int:
return 8
def custom_get(instance: Model, value: Any) -> int:
return 1
def custom_set(instance: Model, value: Any) -> int:
return 2
class CustomModel(Model):
custom1 = Field(get_callback=custom_get, set_callback=custom_set)
custom2 = CustomField()
custom3 = CustomField(
get_callback=custom_get, set_callback=custom_set)
custom_model = CustomModel()
self.assertEqual(1, custom_model.custom1)
custom_model.custom1 = 15
self.assertEqual(2, custom_model["custom1"])
self.assertEqual(5, custom_model.custom2)
custom_model.custom2 = 15
self.assertEqual(8, custom_model["custom2"])
self.assertEqual(1, custom_model.custom3)
custom_model.custom3 = 15
self.assertEqual(2, custom_model["custom3"])
def test_delete_field_by_key(self) -> None:
foo = Foo.create(bar="value")
result = Foo.first()
if result is None:
self.fail("Did not save Foo entry.")
return
self.assertEqual("value", foo["bar"])
del result["bar"]
result.save()
result = Foo.first()
if result is None:
self.fail("Did not retain Foo entry.")
return
self.assertNotIn("bar", result)
def test_first_returns_first_matching_instance(self) -> None:
foo = Foo()
foo.bar = "search"
foo.save()
for x in range(3):
foo_x = Foo()
foo_x.bar = "search"
foo_x.save()
result = foo.first(bar="search")
self.assertEqual(result, foo)
def tearDown(self) -> None:
if DELETE:
self._conn.drop_database(DBNAME)
self._conn.drop_database(ALTDB)
self._conn.close()
``` |
{
"source": "joshmarshall/testnado",
"score": 2
} |
#### File: testnado/credentials/header_credentials.py
```python
class HeaderCredentials(object):
def __init__(self, headers):
self._headers = headers
def __call__(self, fetch_arguments):
for header_key, header_value in self._headers.items():
fetch_arguments.headers.setdefault(header_key, header_value)
```
#### File: testnado/testnado/handler_test_case.py
```python
from testnado import AuthenticatedFetchCase
from tornado.testing import AsyncHTTPTestCase
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
class HandlerTestCase(AuthenticatedFetchCase, AsyncHTTPTestCase):
def assert_redirected_path_equals(self, expected_path, response):
if "Location" not in response.headers:
self.fail("Response does not have a 'Location' header.")
location = response.headers["Location"]
path = urlparse.urlparse(location).path
self.assertEqual(expected_path, path)
```
#### File: testnado/tests/test_handler_test_case.py
```python
from testnado import HandlerTestCase
from tests.helpers import TestCaseTestCase
from tornado.web import Application, RequestHandler
class TestHandlerTestCase(TestCaseTestCase):
def test_handler_test_case_get_app(self):
class TestHandlerTestCaseNoApp(HandlerTestCase):
def test_thing(self):
self.fail(
"Tornado AsyncHTTPTestCase should fail before this "
" because get_app() has not been implemented.")
with self.assertRaises(NotImplementedError):
self.execute_case(TestHandlerTestCaseNoApp)
def test_handler_test_case_get_credentials(self):
class TestHandlerTestCaseNoCredentials(HandlerTestCase):
def get_app(self):
return Application()
def test_auth(self):
with self.assertRaises(NotImplementedError):
self.authenticated_fetch("/secret")
self.execute_case(TestHandlerTestCaseNoCredentials)
def test_handler_assert_redirected_path_equals(self):
class Handler(RequestHandler):
def get(self):
self.redirect("http://google.com/location")
class TestHandlerAssertRedirect(HandlerTestCase):
def get_app(self):
return Application([("/redirect", Handler)])
def test_redirect(self):
response = self.fetch("/redirect")
self.assert_redirected_path_equals("/location", response)
self.execute_case(TestHandlerAssertRedirect)
def test_handler_assert_redirected_path_mismatch_query_raises(self):
class Handler(RequestHandler):
def get(self):
self.redirect("http://google.com/foobar")
class TestHandlerAssertRedirect(HandlerTestCase):
def get_app(self):
return Application([("/redirect", Handler)])
def test_redirect(self):
response = self.fetch("/redirect")
self.assert_redirected_path_equals("/location", response)
with self.assertRaises(AssertionError):
self.execute_case(TestHandlerAssertRedirect)
``` |
{
"source": "josh-marsh/thursday",
"score": 3
} |
#### File: thursday/thursday/models.py
```python
import math
import pickle
import keras
from keras import backend as K
from keras.callbacks import EarlyStopping
from keras.callbacks import History
from keras.callbacks import ReduceLROnPlateau
from keras.engine.topology import Layer
from keras.layers import AveragePooling2D
from keras.layers import BatchNormalization
from keras.layers import Concatenate
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import Input
from keras.layers import Lambda
from keras.layers import MaxPooling2D
from keras.layers import multiply
from keras.models import load_model
from keras.models import Model
from keras.models import model_from_json
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils import shuffle
import tensorflow as tf
K.set_image_dim_ordering('tf')
K.set_image_data_format('channels_last')
class SklearnModel:
"""Wrapper for sklearn classifiers.
This creates a wrapper that can be instantiated to any sklearn
classifer that takes input data with shape (samples, features)
and label data with shape (samples,). Using it's train method
can generate a trained model, and load the same trained model
using the load method at any point on the script.
"""
def __init__(self, Model, datagen=None, nb_augment=None, seed=0, **kwargs):
"""Attributes:
Model: Sklearn classifier.
datagen: The output of the data_gen function to apply random
augmentations to our data in real time
(a keras.preprocessing.image.ImageDataGenerator object)
nb_augment: Int. Factor by which the number of samples is
increased by random augmentations.
seed: Seed value to consistently initialize the random number
generator.
**kwargs: Keyword arguments passed to sklearn.
"""
self.Model = Model
self.datagen = datagen
self.nb_augment = nb_augment
self.seed = seed
self.kwargs = kwargs
self.model = None
self.path = None
self.name = Model.__name__
def fit(self, train_x, train_y):
"""Trains sklearn model.
# Arguments:
train_x: Array of images with shape [samples, dim, dim, 1].
train_y: Array of labels with shape [samples ,].
# Returns:
self
"""
# Shuffling
train_x, train_y = shuffle(train_x, train_y, random_state=self.seed)
# Augmenting data
if self.datagen is not None:
train_x, train_y = self.augment(train_x, train_y, batch_size=
train_x.shape[0], nb_augment=self.nb_augment)
# Shuffling
train_x, train_y = shuffle(train_x, train_y, random_state=self.seed)
# Flattening images
train_x = np.reshape(train_x, (np.shape(train_x)[0], -1))
try:
model = self.Model(random_state=self.seed, class_weight='balanced',
**self.kwargs)
except TypeError:
try:
model = self.Model(class_weight='balanced', **self.kwargs)
except TypeError:
model = self.Model(**self.kwargs)
model = model.fit(train_x, train_y)
self.model = model
return self
def predict_proba(self, test_x):
""" Probability estimates for samples.
# Arguments:
test_x: Array of images with shape [samples, dim, dim, 1].
# Returns:
predictions: Probability estimates for test_x.
"""
# Flattening images
test_x = np.reshape(test_x, (np.shape(test_x)[0], -1))
predictions = self.model.predict_proba(test_x)
return predictions
def predict(self, test_x):
"""Predicting class labels for samples in test_x.
# Arguments:
test_x: Array of images with shape [samples, dim, dim, 1].
# Returns:
predictions: Class predictions for test_x.
"""
predictions = self.predict_proba(test_x)
predictions = np.around(predictions)
return predictions
def save(self, path=None):
"""Saves model as pickle file.
# Arguments:
path: File path to save the model. Must be a .pk file.
# Returns:
self
"""
self.path = path
with open(path, 'wb') as f:
pickle.dump(self.model, f)
return self
def load(self, path=None):
"""Loads trained Sklearn Model from disk.
# Arguments:
path: File path load saved the model from..
# Returns:
self
"""
if path is None:
if self.path is not None:
path = self.path
else:
print ("no model can be found")
with open(path, 'rb') as f:
model = pickle.load(f)
self.model = model
return self
def augment(self, images, labels, datagen=None, batch_size=32, nb_augment=None):
"""Augments data for Sklearn models.
Using base set of images, a random combination is augmentations within
a defined range is applied to generate a new batch of data.
# Arguments
images: Array of images with shape (samples, dim, dim, 1)
labels: Array of labels
datagen: The data generator outputed by the data_gen function.
batch_size: Number of sample per batch.
nb_augment: factor that the data is increased by via augmentation
seed: Seed value to consistently initialize the random number
generator.
# Returns
Array of augmented images and their corresponding labels.
"""
if nb_augment is None:
nb_augment = self.nb_augment
if datagen is None:
datagen = self.datagen
# Number of images
samples = np.shape(images)[0]
# the .flow() command below generates batches of randomly transformed images
gen = datagen.flow(images, labels, batch_size=batch_size, shuffle=True, seed=self.seed)
# Generate empty data arrays
pro_images = np.zeros((images.shape[0] * nb_augment, images.shape[1],
images.shape[2], 1))
pro_labels = np.zeros((labels.shape[0] * nb_augment))
for step in range(1, nb_augment+1):
batch = 1
b = batch_size
b_start = samples * (step-1)
for X_batch, Y_batch in gen:
if batch < (samples / b):
cut_start = b_start + b * (batch-1)
cut_stop = b_start + batch * b
pro_images[cut_start:cut_stop, :, :, :] = X_batch
pro_labels[cut_start:cut_stop] = Y_batch
elif batch == samples // b:
break
else:
cut_start = b_start + b * (batch-1)
cut_stop = b_start + b * (batch-1) + X_batch.shape[0] % b
pro_images[cut_start:cut_stop, :, :, :] = X_batch
pro_labels[cut_start:cut_stop] = Y_batch
break
batch += 1
return pro_images, pro_labels
class HOGNet:
"""Wrapper for our hognet keras model.
This creates a class that acts as a wrapper around our custom
keras model, aka hognet. The train method trains the model on our
data generator to our specifed training paramerts.
Using the load method, we can load the fully trained model from the
disk at any point in the script. This method can be useful when using
notebooks, as training time can be significant.
"""
def __init__(self, datagen=None, batch_size=32, steps_per_epoch=50,
max_epoch=100, patience=5, gap=2, seed=None):
"""Attributes:
name: name of the file at which the model is saved.
No file extension.
datagen: The output of the data_gen function to apply random
augmentations to our data in real time
(a keras.preprocessing.image.ImageDataGenerator object)
batch_size: number of images per batch (i.e number of images
generated per batch)
steps_per_epoch: number of batchs per step (i.e number of
batches generated by datagen per step)
max_epoch: maximum number of epochs the model for. The model
should stop training automatically when the loss stops
decreasing.
patience: number of epochs with no improvement after which
training will be stopped.
gap: Number of layers that have thier weights unfrozen per
training cycle.
seed: Seed value to consistently initialize the random number
generator.
"""
self.datagen = datagen
self.batch_size = batch_size
self.steps_per_epoch = steps_per_epoch
self.max_epoch = max_epoch
self.patience = patience
self.gap = gap
self.seed = seed
self.model = None
self.history = None
self.class_weight = None
self.prewitt_x = None
self.prewitt_y = None
self.cent = None
self.name = "HOGNet"
# Setting random number generator seeds for numpy and tensorflow
np.random.seed(self.seed)
tf.set_random_seed(self.seed)
# Model HyperParameters
# The following values are base on both past research and much testing
bins = 8 # number of bins in histogram
cell_dim = 8 # height and width of the cells
block_dim = 2 # if changed, must add more block layers. Don't attempt.
bs = bin_stride_length = 1
# Number of cells along each dim
cell_nb = 256 // cell_dim
assert not 256 % cell_dim
# Defining Values
w = 2*np.pi/bins # width of each bin
centers = np.arange(-np.pi, np.pi, w) + 0.5 * w # centers of each bin
# Weights for the x and y convolutions to calculate image gradients
prewitt_x = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
prewitt_y = np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]])
# Reshaping Prewitt opperators to required shape
self.prewitt_x = prewitt_x.reshape((1, 3, 3, 1, 1)).astype('float64')
self.prewitt_y = prewitt_y.reshape((1, 3, 3, 1, 1)).astype('float64')
# Adding tiny gaussian noise
self.prewitt_x += 0.01 * np.random.randn(1, 3, 3, 1, 1)
self.prewitt_y += 0.01 * np.random.randn(1, 3, 3, 1, 1)
# Generating weights for histogram construction
self.cent = np.vstack((np.sin(centers), np.cos(centers)))
self.cent = self.cent.reshape((1, 1, 1, 2, bins))
# Generating Filters for the block Operations
def create_block_filters(block_dim):
filters = np.zeros((block_dim ** 2, block_dim, block_dim))
count = 0
for i in range(block_dim):
for j in range(block_dim):
filters[count, i, j] = 1
count += 1
return filters
# block_dim must be 2
# Increasing this will require adding more tf.nn.depthwise_conv2d functions.
# There is a depthwise_conv2dfor each element in a single filter
# there are block_dim^2 elements in a filter
block_filters = create_block_filters(block_dim)
# Reshaping to satisfy required shape for weight array
# copying each filter along the last axis
# Must have shape [filter_height, filter_width, in_channels, channel_multiplier]
# (see Tensorflow docs for tf.nn.depthwise_conv2d)
b_shp = block_filters.shape
block_filters = block_filters.reshape((b_shp[0], b_shp[1], b_shp[2], 1, 1))
block_filters = block_filters.repeat(bins, axis=3)
# Converting filters to tensors
filt1 = tf.convert_to_tensor(block_filters[0, :, :, :, :])
filt2 = tf.convert_to_tensor(block_filters[1, :, :, :, :])
filt3 = tf.convert_to_tensor(block_filters[2, :, :, :, :])
filt4 = tf.convert_to_tensor(block_filters[3, :, :, :, :])
filt1 = tf.cast(filt1, dtype=tf.float32)
filt2 = tf.cast(filt2, dtype=tf.float32)
filt3 = tf.cast(filt3, dtype=tf.float32)
filt4 = tf.cast(filt4, dtype=tf.float32)
def calculate_magnitudes(conv_stacked):
mags = tf.norm((conv_stacked), axis=3)
return mags
def calculate_angles(conv_stacked):
angles = tf.atan2(conv_stacked[:, :, :, 1], conv_stacked[:, :, :, 0])
return angles
def calculate_sin_cos(angles):
sin = K.sin(angles)
cos = K.cos(angles)
sin_cos = K.stack([sin, cos], axis =-1)
return sin_cos
def block1(cells):
c_blocks = tf.nn.depthwise_conv2d(cells, filt1, strides=(1, bs, bs, 1),
padding="SAME")
return c_blocks
def block2(cells):
c_blocks = tf.nn.depthwise_conv2d(cells, filt2, strides=(1, bs, bs, 1),
padding="SAME")
return c_blocks
def block3(cells):
c_blocks = tf.nn.depthwise_conv2d(cells, filt3, strides=(1, bs, bs, 1),
padding="SAME")
return c_blocks
def block4(cells):
c_blocks = tf.nn.depthwise_conv2d(cells, filt4, strides=(1, bs, bs, 1),
padding="SAME")
return c_blocks
def block_norm_function(bins_layer):
c = 0.00000001
divisor = tf.expand_dims(tf.sqrt(tf.norm(block_layer, axis=-1) + c), -1)
block_norm = tf.div(block_layer, divisor)
return block_norm
def hog_norm_function(bins_layer):
c = 0.00000001
divisor = tf.expand_dims(tf.sqrt(tf.norm(bins_layer, axis=-1) + c), -1)
hog_norms = tf.div(bins_layer, divisor)
divisor = tf.expand_dims(tf.sqrt(tf.norm(hog_norms, axis=-1) + c), -1)
hog_norms = tf.div(hog_norms, divisor)
return hog_norms
# Building Model
inputs = Input(shape=(256, 256, 1), name="input")
# Convolutions
x_conv = Conv2D(1, (3,3), strides=(1, 1), padding="same",
data_format="channels_last", trainable=True, use_bias=False,
name="conv_x")(inputs)
y_conv = Conv2D(1, (3,3), strides=(1, 1), padding="same",
data_format="channels_last", trainable=True, use_bias=False,
name="conv_y")(inputs)
# Stacking you cannot multiple layer (i.e mags and angles)
conv_stacked = Concatenate(axis=-1, name="Conv_Stacked")([x_conv, y_conv])
# Calculating the gradient magnitudes and angles
mags = Lambda(calculate_magnitudes, output_shape=(256, 256),
name="mags1")(conv_stacked)
mags = Lambda(lambda x: K.stack((mags, mags), axis=-1),
name="mags2")(mags) # To enable sin_cos_vec
angles = Lambda(calculate_angles, output_shape=(256, 256),
name="angles")(conv_stacked)
# Calculating the components of angles in the x and y direction
# Then multiplying by magnitudes, giving angle vectors
sin_cos = Lambda(calculate_sin_cos, output_shape=(256, 256, 2),
name="sin_cos")(angles)
sin_cos_vec = multiply([sin_cos, mags], name="sin_cos_mag")
# Applying each filter (a single bin unit vector) to each angle vector.
# Result is an array with shape (img_height, img_width, bins)
# where each bin contains an angle vectors that contribution to each bin
# Relu activation function to remove negative projections.
votes = Conv2D(8, kernel_size=(1, 1), strides=(1, 1), activation="relu",
trainable=True, bias=False, name="votes")(sin_cos_vec)
# A round about way of splitting the image (i.e vote array)
# into a bunch of non-overlapping cells of size (cell_dim, cell_dim)
# Concatenating values at each bin level, giving shape (cell_nb, cell_nb, bins)
# Result is an array of cells with histograms along the final axis
cells = AveragePooling2D(pool_size=cell_dim, strides=cell_dim, name="cells")(votes)
cells = Lambda(lambda x: x * (cell_dim ** 2), name="cells2")(cells)
# Bin Operations
# Assuming that bin shape = (2, 2)
# Ad hoc way of grouping the cells into overlapping blocks of 2 * 2 cells each.
# Two horizontally or vertically consecutive blocks overlap by two cells,
# i.e block strides.
# As a consequence, each internal cell is covered by four blocks (if bin_dim=2).
block1_layer = Lambda(block1, trainable=True, name="block_opp_1")(cells)
block2_layer = Lambda(block2, trainable=True, name="block_opp_2")(cells)
block3_layer = Lambda(block3, trainable=True, name="block_opp_3")(cells)
block4_layer = Lambda(block4, trainable=True, name="block_opp_4")(cells)
block_layer = Concatenate(axis=-1)([block1_layer, block2_layer,
block3_layer, block4_layer])
# normalize each block feature by its Euclidean norm
block_norm = Lambda(block_norm_function, name="norm1")(block_layer)
hog_norm = Lambda(hog_norm_function, name="norm2")(block_norm)
# Block 1
x = Conv2D(4, (3, 3), activation='relu', padding='same', name='block1_1')(hog_norm)
x = Conv2D(4, (3, 3), activation='relu', padding='same', name='block1_2')(x)
x = Conv2D(4, (3, 3), activation='relu', padding='same', name='block1_3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(8, (3, 3), activation='relu', padding='same', name='block2_1')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same', name='block2_2')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same', name='block2_3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Dense
x = Flatten(name='flat')(x)
x = Dense(50, activation='relu', name='fc1')(x)
x = Dropout(0.2)(x)
x = Dense(50, activation='relu', name='fc2')(x)
x = Dropout(0.2)(x)
x = Dense(50, activation='relu', name='fc3')(x)
x = Dropout(0.2)(x)
logistic_reg = Dense(1, activation = "sigmoid", trainable=True, name="lr")(x)
# Building Model
model = Model(inputs=inputs, outputs=logistic_reg)
self.model = model
def fit(self, train_x, train_y, val_x=None, val_y=None):
"""Fits HOGNet parameters to training data.
Using the set hyperparameters, our custom initial weights are
generated, our custom keras layers are defined in tensorflow, and
our model is constructed in keras. The resulting model is trained
either with or without validation data. The data is generated in
real time using the prefined datagen function. The learning rate
is reduced when the loss plateaus. When the model stops improving
for a set number of epochs (patience), training stops, and a
model is returned.
# Arguments
train_x: Images to train on with shape (samples, dim, dim, 1)
train_y: Image labels to train on as a confusion matrix with
shape (samples, 2)
val_x: Images to validate with. By default, no validation data
is used
val_y: Labels to validate with. By default, no validation data
is used.
# Returns
An updated state where self.model is a trained keras model.
"""
# Setting random number generator seeds for numpy and tensorflow
np.random.seed(self.seed)
tf.set_random_seed(self.seed)
# Setting weights
self.model.layers[1].set_weights(self.prewitt_x)
self.model.layers[2].set_weights(self.prewitt_y)
self.model.layers[9].set_weights(self.cent)
# Checking for validation data
if val_x is None and val_y is None:
validation = False
else:
validation = True
if self.datagen is None:
print ("no data generator has been inputed")
raise
# Shuffling
train_x, train_y = shuffle(train_x, train_y, random_state=self.seed)
# Setting Class weights
self.class_weight = compute_class_weight('balanced'
,np.unique(train_y)
,train_y)
# Constructing class for callbacks
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.loss = []
self.acc = []
if validation is True:
self.val_loss = []
self.val_acc = []
def on_batch_end(self, batch, logs={}):
self.loss.append(logs.get('loss'))
self.acc.append(logs.get('acc'))
if validation is True:
self.val_loss.append(logs.get('val_loss'))
self.val_acc.append(logs.get('val_acc'))
# Freazing Layers 20 to 27 (the Conv2D blocks)
start = 20
stop = 28
i = 0
for layer in self.model.layers:
if i < start:
continue
elif i >= stop:
continue
else:
layer.trainable = False
i = i+1
# Compiling Model
self.model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=["accuracy"])
for layer_block in range(start, stop, self.gap):
if validation is True:
callback = [EarlyStopping(monitor='val_loss', patience=self.patience),
ReduceLROnPlateau(monitor='val_loss', patience=5, verbose=1),
LossHistory()]
self.model.fit_generator(self.datagen.flow(train_x, train_y,
batch_size=self.batch_size, shuffle=True),
steps_per_epoch=self.steps_per_epoch, epochs=self.max_epoch,
validation_data=self.datagen.flow(val_x, val_y,
batch_size=self.batch_size, shuffle=True),
validation_steps=math.ceil(self.steps_per_epoch / 5),
callbacks=callback,
class_weight=self.class_weight)
else:
callback = [EarlyStopping(monitor='loss', patience=self.patience),
ReduceLROnPlateau(monitor='loss', patience=5, verbose=1),
LossHistory()]
self.model.fit_generator(self.datagen.flow(train_x, train_y,
batch_size=self.batch_size, shuffle=True),
steps_per_epoch=self.steps_per_epoch, epochs=self.max_epoch,
callbacks=callback,
class_weight=self.class_weight)
if self.history is None:
self.history = callback[2]
else:
for metric in callback[2]:
self.history[metric].append(callback[2][metric])
i = 0
for layer in self.model.layers:
if i < layer_block:
continue
elif i >= (layer_block + self.gap):
continue
else:
layer.trainable = True
i += 1
return self
def predict_proba(self, test_x):
""" Probability estimates for samples.
# Arguments:
test_x: Array of images with shape [samples, dim, dim, 1].
# Returns:
predictions: Probability estimates for test_x as a confusion
matrix. Shape of [smaples, 2].
"""
predictions = self.model.predict(test_x, batch_size=self.batch_size)
return predictions
def predict(self, test_x):
"""Predicting class labels for samples in test_x.
# Arguments:
test_x: Array of images with shape [samples, dim, dim, 1].
# Returns:
predictions: Probability estimates for test_x as a confusion
matrix. Shape of [smaples, 2].
"""
predictions = self.predict_proba(test_x)
predictions = np.around(predictions)
return predictions
def save(self, path=None):
"""Serialize model weights to HDF5.
# Arguments:
path: File path to save the model weights. Must be a .h5
file.
# Returns:
self
"""
self.model.save_weights(path)
self.path = path
return self
def load(self, path=None):
"""Loads weights into keras Model from disk.
Loads trained Sklearn Model from disk.
# Arguments:
path: File path to load saved weights from from.
# Returns:
self
"""
if path is None:
path = self.path
# load weights into new model
self.model.load_weights(path)
return self
``` |
{
"source": "josh-mattson/TCGstorageAPI",
"score": 2
} |
#### File: TCGstorageAPI/TCGstorageAPI/keymanager.py
```python
class KeyManager(object):
'''
This is a class to store authority and credentials temporarily.
'''
def __init__(self):
'''
The function to create a structure for authority and credentials.
'''
self.credentials = {}
def getKey(self,auth):
'''
The function to get the credential value for an authority.
Parameters:
auth -Authority
Returns:
cred - credential
'''
cred = self.credentials[auth]
return cred
def setKey(self,auth,cred):
'''
The function to set credential for an authority.
Parameters:
auth - Authority
cred - credential
'''
self.credentials[auth]=cred
return
``` |
{
"source": "JoshMayberry/Communication_API",
"score": 3
} |
#### File: JoshMayberry/Communication_API/API_Ethernet.py
```python
__version__ = "2.0.0"
#Import standard elements
import re
import sys
import warnings
import traceback
import subprocess
#Import communication elements for talking to other devices such as printers, the internet, a raspberry pi, etc.
import select
import socket
import netaddr
import MyUtilities.common
import MyUtilities.threadManager
#Required Modules
##py -m pip install
# netaddr
#User Access Variables
ethernetError = socket.error
class Ethernet(API_Com.utilities.Utilities_Container, MyUtilities.threadManager.CommonFunctions):
"""A controller for a Ethernet connection.
Note: If you create a socket in a background function,
do not try to read or write to your GUI until you create and open the socket.
If you do not, the GUI will freeze up.
______________________ EXAMPLE USE ______________________
com = API_Com.build()
ethernet = com.ethernet[0]
ethernet.open("192.168.0.21")
ethernet.send("Lorem Ipsum")
ethernet.close()
_________________________________________________________
com = API_Com.build()
ethernet = com.ethernet[0]
with ethernet.open("192.168.0.21"):
ethernet.send("Lorem Ipsum")
_________________________________________________________
"""
def __init__(self, parent):
"""Defines the internal variables needed to run."""
#Initialize Inherited Modules
API_Com.utilities.Utilities_Container.__init__(self, parent)
MyUtilities.threadManager.CommonFunctions.__init__(self)
#Internal Variables
self.ipScanBlock = [] #Used to store active ip addresses from an ip scan
self.ipScanStop = False #Used to stop the ip scanning function early
self._listener_scan = None
@MyUtilities.common.makeProperty()
class listener_scan():
"""Used to scan for ip addresses."""
def getter(self):
raise FutureWarning("Get this working")
if (self._listener_scan is None):
self.self._listener_scan = self.threadManager.listen(self.listenStatusText, label = "API_COM.statusText", canReplace = True, allowMultiple = True, delay = statusText_delay, errorFunction = self.listenStatusText_handleError, autoStart = False)
return self._listener_scan
def setter(self, value):
raise FutureWarning("Get this working")
self._listener_scan = value
def remover(self):
raise FutureWarning("Get this working")
del self._listener_scan
def getAll(self, *, asBackground = False):
"""Returns all local area connections.
Example Input: getAll()
"""
if (not asBackground):
return self.startScanIpRange(asBackground = asBackground)
self.startScanIpRange()
finished = False
while (not finished):
valueList, finished = self.checkScanIpRange()
time.sleep(100 / 1000)
return valueList
def ping(self, address):
"""Returns True if the given ip address is online. Otherwise, it returns False.
Code modified from http://www.opentechguides.com/how-to/article/python/57/python-ping-subnet.html
address (str) - The ip address to ping
Example Input: ping("169.254.231.0")
"""
#Configure subprocess to hide the console window
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
#Remove Whitespace
address = re.sub("\s", "", address)
#Ping the address
output = subprocess.Popen(['ping', '-n', '1', '-w', '500', address], stdout=subprocess.PIPE, startupinfo=info).communicate()[0]
output = output.decode("utf-8")
#Interpret Ping Results
if ("Destination host unreachable" in output):
return False #Offline
elif ("Request timed out" in output):
return False #Offline
elif ("could not find host" in output):
return False #Offline
else:
return True #Online
def startScanIpRange(self, start = None, end = None, *, asBackground = True):
"""Scans a range of ip addresses in the given range for online ones.
Because this can take some time, it saves the list of ip addresses as an internal variable.
Special thanks to lovetocode on http://stackoverflow.com/questions/4525492/python-list-of-addressable-ip-addresses
start (str) - The ip address to start at
- If None: Will use the current ip address group and start at 0
end (str) - The ip address to stop after
- If None: Will use the current ip address group and end at 255
Example Input: startScanIpRange()
Example Input: startScanIpRange("169.254.231.0", "169.254.231.24")
"""
def runFunction(self, start, end):
"""Needed to scan on a separate thread so the GUI is not tied up."""
#Remove Whitespace
start = re.sub("\s", "", start)
end = re.sub("\s", "", end)
#Get ip scan range
networkAddressSet = list(netaddr.IPRange(start, end))
#For each IP address in the subnet, run the ping command with the subprocess.popen interface
for i in range(len(networkAddressSet)):
if (self.ipScanStop):
self.ipScanStop = False
break
address = str(networkAddressSet[i])
online = self.ping(address)
#Determine if the address is desired by the user
if (online):
self.ipScanBlock.append(address)
#Mark end of message
self.ipScanBlock.append(None)
if ((start is None) or (end is None)):
# raise FutureWarning("Add code here for getting the current ip Address")
currentIp = socket.gethostbyname(socket.gethostname())
start = currentIp[:-2] + "0"
end = currentIp[:-2] + "255"
#Listen for data on a separate thread
self.ipScanBlock = []
if (asBackground):
raise FutureWarning("Get this working again")
self.backgroundRun(runFunction, [self, start, end])
else:
runFunction(self, start, end)
return self.ipScanBlock
def checkScanIpRange(self):
"""Checks for found active ip addresses from the scan.
Each read portion is an element in a list.
Returns the current block of data and whether it is finished listening or not.
Example Input: checkScanIpRange()
"""
raise FutureWarning("Get this working again")
#The entire message has been read once the last element is None.
finished = False
if (len(self.ipScanBlock) != 0):
if (self.ipScanBlock[-1] is None):
finished = True
self.ipScanBlock.pop(-1) #Remove the None from the end so the user does not get confused
return self.ipScanBlock, finished
def stopScanIpRange(self):
"""Stops listening for data from the socket.
Note: The data is still in the buffer. You can resume listening by starting startRecieve() again.
To flush it, close the socket and then open it again.
Example Input: stopScanIpRange()
"""
raise FutureWarning("Get this working again")
self.ipScanStop = True
class Child(API_Com.utilities.Utilities_Child):
"""An Ethernet connection."""
def __init__(self, parent, label):
"""Defines the internal variables needed to run."""
#Initialize Inherited Modules
API_Com.utilities.Utilities_Child.__init__(self, parent, label)
#Background thread variables
self.dataBlock = [] #Used to recieve data from the socket
self.clientDict = {} #Used to keep track of all client connections {"device": connection object (socket), "data": client dataBlock (str), "stop": stop flag (bool), "listening": currently listening flag, "finished": recieved all flag}
self.recieveStop = False #Used to stop the recieving function early
self.recieveListening = False #Used to chek if the recieve function has started listening or if it has finished listeing
#Create the socket
self.device = None
self.stream = None
self.address = None
self.port = None
def __exit__(self, exc_type, exc_value, traceback):
"""Allows the user to use a with statement to make sure the socket connection gets closed after use."""
self.close()
return API_Com.utilities.Utilities_Container.__exit__(self, exc_type, exc_value, traceback)
def open(self, address, port = 9100, error = False, pingCheck = False,
timeout = -1, stream = True):
"""Opens the socket connection.
address (str) - The ip address/website you are connecting to
port (int) - The socket port that is being used
error (bool) - Determines what happens if an error occurs
If True: If there is an error, returns an error indicator. Otherwise, returns a 0
If False: Raises an error exception
pingCheck (bool) - Determines if it will ping an ip address before connecting to it to confirm it exists
Example Input: open("www.example.com")
"""
if (self.device is not None):
warnings.warn(f"Socket already opened", Warning, stacklevel = 2)
#Account for the socket having been closed
# if (self.device is None):
if (stream):
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.stream = "SOCK_STREAM"
else:
self.device = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.stream = "SOCK_DGRAM"
if (timeout != -1):
self.setTimeout(timeout)
#Remove any white space
address = re.sub("\s", "", address)
#Make sure it exists
if (pingCheck):
addressExists = self.parent.ping(address)
if (not addressExists):
print(f"Cannot ping address {address}")
self.device = None
return False
#Remember Values
self.address = address
self.port = port
#Connect to the socket
if (stream):
if (error):
error = self.device.connect_ex((address, port))
return error
else:
self.device.connect((address, port))
#Finish
if (pingCheck):
return True
def close(self, now = False):
"""Closes the socket connection.
now (bool) - Determines how the socket is closed
If True: Releases the resource associated with a connection
and closes the connection immediately
If False: Releases the resource associated with a connection
but does not necessarily close the connection immediately
If None: Closes the socket without closing the underlying file descriptor
Example Input: close()
Example Input: close(True)
Example Input: close(None)
"""
if (self.device is None):
warnings.warn(f"Socket already closed", Warning, stacklevel = 2)
return
if (now is not None):
if (now):
self.restrict()
self.device.close()
else:
self.device.detach()
self.device = None
def send(self, data):
"""Sends data across the socket connection.
data (str) - What will be sent
Example Input: send("lorem")
Example Input: send(1234)
"""
#Account for numbers, lists, etc.
if ((type(data) != str) and (type(data) != bytes)):
data = str(data)
#Make sure that the data is a byte string
if (type(data) != bytes):
data = data.encode() #The .encode() is needed for python 3.4, but not for python 2.7
#Send the data
if (self.stream == "SOCK_DGRAM"):
self.device.sendto(data, (self.address, self.port))
else:
self.device.sendall(data)
# self.device.send(data)
def startRecieve(self, bufferSize = 256, scanDelay = 500):
"""Retrieves data from the socket connection.
Because this can take some time, it saves the list of ip addresses as an internal variable.
Special thanks to <NAME> and david.gaarenstroom on http://code.activestate.com/recipes/577514-chek-if-a-number-is-a-power-of-two/
bufferSize (int) - The size of the recieveing buffer. Should be a power of 2
Example Input: startRecieve()
Example Input: startRecieve(512)
Example Input: startRecieve(4096)
"""
def runFunction(self, bufferSize):
"""Needed to listen on a separate thread so the GUI is not tied up."""
self.recieveListening = True
#Listen
while True:
#Check for stop command
if (self.recieveStop or (self.device is None)):# or ((len(self.dataBlock) > 0) and (self.dataBlock[-1] is None))):
self.recieveStop = False
break
#Check for data to recieve
self.device.setblocking(0)
ready = select.select([self.device], [], [], 0.5)
if (not ready[0]):
#Stop listening
break
#Retrieve the block of data
data = self.device.recv(bufferSize).decode() #The .decode is needed for python 3.4, but not for python 2.7
# data, address = self.device.recvfrom(bufferSize)#.decode() #The .decode is needed for python 3.4, but not for python 2.7
#Check for end of data stream
if (len(data) < 1):
#Stop listening
break
#Save the data
self.dataBlock.append(data)
time.sleep(scanDelay / 1000)
#Mark end of message
self.dataBlock.append(None)
self.recieveListening = False
#Checks buffer size
if (not (((bufferSize & (bufferSize - 1)) == 0) and (bufferSize > 0))):
warnings.warn(f"Buffer size must be a power of 2, not {bufferSize}", Warning, stacklevel = 2)
return None
if (self.recieveListening):
warnings.warn(f"Already listening to socket", Warning, stacklevel = 2)
return None
if ((len(self.dataBlock) > 0) and (self.dataBlock[-1] is None)):
warnings.warn(f"Use checkRecieve() to take out data", Warning, stacklevel = 2)
return None
#Listen for data on a separate thread
self.dataBlock = []
raise FutureWarning("Get this working again")
self.backgroundRun(runFunction, [self, bufferSize])
def checkRecieve(self, removeNone = True):
"""Checks what the recieveing data looks like.
Each read portion is an element in a list.
Returns the current block of data and whether it is finished listening or not.
Example Input: checkRecieve()
"""
if (self.recieveStop):
print("WARNING: Recieveing has stopped")
return [], False
if (not self.recieveListening):
if (len(self.dataBlock) > 0):
if (self.dataBlock[-1] is not None):
self.startRecieve()
else:
self.startRecieve()
#The entire message has been read once the last element is None.
finished = False
compareBlock = self.dataBlock[:] #Account for changing mid-analysis
self.dataBlock = []
if (len(compareBlock) != 0):
if (compareBlock[-1] is None):
finished = True
if (removeNone):
compareBlock.pop(-1) #Remove the None from the end so the user does not get confused
data = compareBlock[:]
return data, finished
def stopRecieve(self):
"""Stops listening for data from the socket.
Note: The data is still in the buffer. You can resume listening by starting startRecieve() again.
To flush it, close the socket and then open it again.
Example Input: stopRecieve()
"""
self.recieveStop = True
#Server Side
def startServer(self, address = None, port = 10000, clients = 1, scanDelay = 500):
"""Starts a server that connects to clients.
Modified code from <NAME> on: https://pymotw.com/2/socket/tcp.html
port (int) - The port number to listen on
clients (int) - The number of clients to listen for
scanDelay (int) - How long in milliseconds between scans for clients
Example Input: startServer()
Example Input: startServer(port = 80)
Example Input: startServer(clients = 5)
"""
def runFunction():
"""Needed to listen on a separate thread so the GUI is not tied up."""
nonlocal self, address, port, clients, scanDelay
#Bind the socket to the port
if (address is None):
address = '0.0.0.0'
#Remove any white space
address = re.sub("\s", "", address)
serverIp = (socket.gethostbyname(address), port)
self.address = address
self.port = port
self.device.bind(serverIp)
#Listen for incoming connections
self.device.listen(clients)
count = clients #How many clients still need to connect
clientIp = None
while True:
# Wait for a connection
try:
connection, clientIp = self.device.accept()
except:
traceback.print_exc()
if (clientIp is not None):
count = self.closeClient(clientIp[0], count)
else:
break
#Check for all clients having connected and left
if (count <= 0):
break
if (clientIp is not None):
#Catalogue client
if (clientIp not in self.clientDict):
self.clientDict[clientIp] = {"device": connection, "data": "", "stop": False, "listening": False, "finished": False}
else:
warnings.warn(f"Client {clientIp} recieved again", Warning, stacklevel = 2)
time.sleep(scanDelay / 1000)
#Error Checking
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Listen for data on a separate thread
raise FutureWarning("Get this working again")
self.backgroundRun(runFunction)
def clientSend(self, clientIp, data, logoff = False):
"""Sends data across the socket connection to a client.
clientIp (str) - The IP address of the client
data (str) - What will be sent
Example Input: clientSend("169.254.231.0", "lorem")
Example Input: clientSend("169.254.231.0", 1234)
"""
#Account for numbers, lists, etc.
if ((type(data) != str) and (type(data) != bytes)):
data = str(data)
#Make sure that the data is a byte string
if (type(data) != bytes):
data = data.encode() #The .encode() is needed for python 3.4, but not for python 2.7
#Send the data
client = self.clientDict[clientIp]["device"]
client.sendall(data)
# if (logoff):
# client.shutdown(socket.SHUT_WR)
def clientStartRecieve(self, clientIp, bufferSize = 256):
"""Retrieves data from the socket connection.
Because this can take some time, it saves the list of ip addresses as an internal variable.
Special thanks to <NAME> and david.gaarenstroom on http://code.activestate.com/recipes/577514-chek-if-a-number-is-a-power-of-two/
clientIp (str) - The IP address of the client
bufferSize (int) - The size of the recieveing buffer. Should be a power of 2
Example Input: clientStartRecieve("169.254.231.0")
Example Input: clientStartRecieve("169.254.231.0", 512)
"""
def runFunction(self, clientIp, bufferSize):
"""Needed to listen on a separate thread so the GUI is not tied up."""
#Reset client dataBlock
self.clientDict[clientIp]["data"] = ""
self.clientDict[clientIp]["finished"] = False
self.clientDict[clientIp]["listening"] = True
#Listen
client = self.clientDict[clientIp]["device"]
while True:
#Check for stop command
if (self.clientDict[clientIp]["stop"]):
self.clientDict[clientIp]["stop"] = False
break
#Retrieve the block of data
data = client.recv(bufferSize).decode() #The .decode is needed for python 3.4, but not for python 2.7
#Save the data
self.clientDict[clientIp]["data"] += data
#Check for end of data stream
if (len(data) < bufferSize):
#Stop listening
break
#Mark end of message
self.clientDict[clientIp]["finished"] = True
self.clientDict[clientIp]["listening"] = False
#Checks buffer size
if (not (((bufferSize & (bufferSize - 1)) == 0) and (bufferSize > 0))):
warnings.warn(f"Buffer size must be a power of 2, not {bufferSize}", Warning, stacklevel = 2)
return None
#Listen for data on a separate thread
self.dataBlock = []
raise FutureWarning("Get this working again")
self.backgroundRun(runFunction, [self, clientIp, bufferSize])
def clientCheckRecieve(self, clientIp):
"""Checks what the recieveing data looks like.
Each read portion is an element in a list.
Returns the current block of data and whether it is finished listening or not.
clientIp (str) - The IP address of the client
Example Input: clientCheckRecieve("169.254.231.0")
"""
if (self.clientDict[clientIp]["stop"]):
print("WARNING: Recieveing has stopped")
return [], False
if (not self.clientDict[clientIp]["listening"]):
if (len(self.clientDict[clientIp]["data"]) > 0):
if (self.clientDict[clientIp]["finished"] != True):
self.clientStartRecieve(clientIp)
else:
self.clientStartRecieve(clientIp)
#Account for changing mid-analysis
finished = self.clientDict[clientIp]["finished"]
data = self.clientDict[clientIp]["data"][:]
self.clientDict[clientIp]["data"] = ""
# if (len(compareBlock) == 0):
# finished = False
return data, finished
def clientStopRecieve(self, clientIp):
"""Stops listening for data from the client.
Note: The data is still in the buffer. You can resume listening by starting clientStartRecieve() again.
To flush it, close the client and then open it again.
clientIp (str) - The IP address of the client
Example Input: clientStopRecieve("169.254.231.0")
"""
self.clientDict[clientIp]["stop"] = True
def getClients(self):
"""Returns a list of all current client IP addresses.
Example Input: getClients()
"""
clients = list(self.clientDict.keys())
return clients
def closeClient(self, clientIp, clientsLeft = None):
"""Cleans up the connection with a server client.
clientIp (str) - The IP number of the client.
clientsLeft (int) - How many clients still need to connect
Example Input: closeClient("169.254.231.0")
"""
if (clientIp not in self.clientDict):
errorMessage = f"There is no client {clientIp} for this server"
raise ValueError(errorMessage)
else:
client = self.clientDict[clientIp]["device"]
client.close()
del(self.clientDict[clientIp])
if (clientsLeft is not None):
return clientsLeft - 1
def restrict(self, how = "rw"):
"""Restricts the data flow between the ends of the socket.
how (str) - What will be shut down
"r" - Will not allow data to be recieved
"w" - Will not allow data to be sent
"rw" - Will not allow data to be recieved or sent
"b" - Will block the data
Example Input: restrict()
Example Input: restrict("r")
"""
if (how == "rw"):
self.device.shutdown(socket.SHUT_RDWR)
elif (how == "r"):
self.device.shutdown(socket.SHUT_RD)
elif (how == "w"):
self.device.shutdown(socket.SHUT_WR)
elif (how == "b"):
self.device.setblocking(False)
else:
warnings.warn(f"Unknown restiction flag {how}", Warning, stacklevel = 2)
def unrestrict(self, how = "rw"):
"""Un-Restricts the data flow between the ends of the socket.
how (str) - What will be shut down
"r" - Will allow data to be recieved
"w" - Will allow data to be sent
"rw" - Will allow data to be recieved and sent
"b" - Will not block the data. Note: Sets the timeout to None
Example Input: unrestrict()
Example Input: unrestrict("r")
"""
if (how == "rw"):
# self.device.shutdown(socket.SHUT_RDWR)
pass
elif (how == "r"):
# self.device.shutdown(socket.SHUT_RD)
pass
elif (how == "w"):
# self.device.shutdown(socket.SHUT_WR)
pass
elif (how == "b"):
self.device.setblocking(True)
else:
warnings.warn(f"Unknown unrestiction flag {how}", Warning, stacklevel = 2)
def getTimeout(self):
"""Gets the tiemout for the socket.
By default, the timeout is None.
Example Input: getTimeout()
"""
timeout = self.device.gettimeout()
return timeout
def setTimeout(self, timeout):
"""Sets the tiemout for the socket.
By default, the timeout is None.
timeout (int) - How many seconds until timeout
If None: There is no timeout
Example Input: setTimeout(60)
"""
#Ensure that there is no negative value
if (timeout is not None):
if (timeout < 0):
warnings.warn(f"Timeout cannot be negative for setTimeout() in {self.__repr__()}", Warning, stacklevel = 2)
return
self.device.settimeout(timeout)
def getAddress(self, mine = False):
"""Returns either the socket address or the remote address.
mine (bool) - Determines which address is returned
If True: Returns the socket's address
If False: Returns the remote address
Example Input: getAddress()
"""
if (mine):
address = self.device.getsockname()
else:
address = self.device.getpeername()
return address
def isOpen(self, address = None):
"""Returns if a socket is already open."""
# error = self.device.connect_ex((address, port))
# if (error == 0):
# self.device.shutdown(2)
# return True
# return False
if (self.device is None):
return True
return False
```
#### File: JoshMayberry/Communication_API/controller.py
```python
__version__ = "1.1.1"
#Import standard elements
import re
import os
import sys
import warnings
import traceback
import threading
import subprocess
# #Import communication elements for talking to other devices such as printers, the internet, a raspberry pi, etc.
import usb
import types
import bisect
import select
import socket
import serial
import netaddr
import serial.tools.list_ports
# #Import barcode modules for drawing and decoding barcodes
import qrcode
import barcode
# #Import email modules for sending and viewing emails
import smtplib
import xml.etree.cElementTree as xml
from email import encoders as email_encoders
from email.mime.base import MIMEBase as email_MIMEBase
from email.mime.text import MIMEText as email_MIMEText
from email.mime.image import MIMEImage as email_MIMEImage
from email.mime.multipart import MIMEMultipart as email_MIMEMultipart
import io
import wx
import glob
import PIL
import base64
import zipfile
import collections
import API_Database as Database
import MyUtilities.common
import MyUtilities.threadManager
#Required Modules
##py -m pip install
# pyusb
# qrcode
# netaddr
# pyserial
# pyBarcode
##Module dependancies (Install the following .exe and/or .dll files)
#The latest Windows binary on "https://sourceforge.net/projects/libusb/files/libusb-1.0/libusb-1.0.21/libusb-1.0.21.7z/download"
#If on 64-bit Windows, copy "MS64\dll\libusb-1.0.dll" into "C:\windows\system32"
#If on 32-bit windows, copy "MS32\dll\libusb-1.0.dll" into "C:\windows\SysWOW64"
#User Access Variables
ethernetError = socket.error
class CommunicationManager():
"""Helps the user to communicate with other devices.
CURRENTLY SUPPORTED METHODS
- COM Port
- Ethernet & Wi-fi
- Barcode
- QR Code
- USB
- Email
UPCOMING SUPPORTED METHODS
- Raspberry Pi GPIO
Example Input: Communication()
"""
def __init__(self):
"""Initialized internal variables."""
self.ethernet = Ethernet(self)
self.barcode = Barcode(self)
self.comPort = ComPort(self)
self.email = Email(self)
self.usb = USB(self)
def __str__(self):
"""Gives diagnostic information on the GUI when it is printed out."""
output = f"Communication()\n-- id: {id(self)}\n"
output += f"-- Ethernets: {len(self.ethernet)}\n"
output += f"-- COM Ports: {len(self.comPort)}\n"
output += f"-- USB Ports: {len(self.usb)}\n"
output += f"-- Barcodes: {len(self.barcode)}\n"
output += f"-- Email Accounts: {len(self.email)}\n"
return output
def __repr__(self):
representation = f"Communication(id = {id(self)})"
return representation
def getAll(self):
"""Returns all available communication types.
Example Input: getAll()
"""
return [self.ethernet, self.comPort, self.usb, self.barcode, self.email]
rootManager = CommunicationManager()
def getEthernet(label = None, *, comManager = None):
"""Returns an ethernet handle with the given label. If it does not exist, it will make one.
label (str) - What ethernet to get
- If None: Will make a new ethernet port
Example Input: getEthernet()
Example Input: getEthernet(1)
"""
global rootManager
comManager = MyUtilities.common.ensure_default(comManager, default = rootManager)
return comManager.ethernet.add(label = label)
def getCom(label = None, *, comManager = None):
"""Returns a com port handle with the given label. If it does not exist, it will make one.
label (str) - What com port to get
- If None: Will make a new com port
Example Input: getCom()
Example Input: getCom(1)
"""
global rootManager
comManager = MyUtilities.common.ensure_default(comManager, default = rootManager)
return comManager.comPort.add(label = label)
def getUsb(label = None, *, comManager = None):
"""Returns a usb handle with the given label. If it does not exist, it will make one.
label (str) - What usb to get
- If None: Will make a new usb port
Example Input: getusb()
Example Input: getusb(1)
"""
global rootManager
comManager = MyUtilities.common.ensure_default(comManager, default = rootManager)
return comManager.usb.add(label = label)
def getBarcode(label = None, *, comManager = None):
"""Returns a barcode handle with the given label. If it does not exist, it will make one.
label (str) - What barcode to get
- If None: Will make a new barcode port
Example Input: getBarcode()
Example Input: getBarcode(1)
"""
global rootManager
comManager = MyUtilities.common.ensure_default(comManager, default = rootManager)
return comManager.barcode.add(label = label)
def getEmail(label = None, *, comManager = None):
"""Returns an email handle with the given label. If it does not exist, it will make one.
label (str) - What email to get
- If None: Will make a new email port
Example Input: getEmail()
Example Input: getEmail(1)
"""
global rootManager
comManager = MyUtilities.common.ensure_default(comManager, default = rootManager)
return comManager.email.add(label = label)
# def runFile():
if __name__ == '__main__':
# print(getEthernet())
# for item in rootManager.comPort.getAll():
# print(item["vendorId"], item["productId"])
# import time
# import subprocess
# subprocess.Popen(["py", "maintenance.py"]) #https://docs.python.org/2/library/os.html#os.startfile
# print("@1.2")
# time.sleep(1)
# print("@1.3")
# sys.exit()
print("Starting")
getEmail().test()
``` |
{
"source": "JoshMayberry/EXE_Generator",
"score": 2
} |
#### File: JoshMayberry/EXE_Generator/exe_cx_freeze.py
```python
import os
import sys
import glob
import stat
import shutil
import cx_Freeze
from distutils.core import setup
import MyUtilities.common
if (__name__ == "__main__"):
from utilities import Exe_Base
from utilities import Utilities
from utilities import data_files_zip
else:
from .utilities import Exe_Base
from .utilities import Utilities
from .utilities import data_files_zip
#Required Modules
##py -m pip install
# cx_Freeze
def clearDirectory(directory):
if (not os.path.exists(directory)):
return
def onerror(function, path, exc_info):
"""An Error handler for shutil.rmtree.
Modified code from <NAME> on https://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied
"""
if (not os.access(path, os.W_OK)):
os.chmod(path, stat.S_IWUSR)
function(path)
else:
raise
#######################################
shutil.rmtree(directory, ignore_errors = False, onerror = onerror)
#Controllers
def build(*args, **kwargs):
return Controller(*args, **kwargs)
_srcfile = os.path.normcase(build.__code__.co_filename)
class Controller(Utilities):
def __init__(self, script = None):
"""Used to create a .exe file.
For good module structure practices, see: http://blog.habnab.it/blog/2013/07/21/python-packages-and-you/
Special thanks to <NAME> for how to detect 32 bit vs 64 bit on https://stackoverflow.com/questions/6107905/which-command-to-use-for-checking-whether-python-is-64bit-or-32bit/12057504#12057504
mainFile (str) - The name of the main .py file
Example Input: Exe_CxFreeze("runMe")
"""
super().__init__()
self.modules = set()
self.data_files = set()
self.zip_include = set()
self.zip_exclude_packages = set()
self.modules_include = set()
self.modules_exclude = set()
self.data_files_compressed = set()
self.script = script
#Properties
@MyUtilities.common.makeProperty(default = None)
class script():
"""What the .py file the .exe will run."""
def setter(self, value: str):
self._script = value
def getter(self):
return self._script
@MyUtilities.common.makeProperty(default = "MyApp")
class title():
"""What the program is called."""
def setter(self, value: str):
self._title = value
def getter(self):
return self._title
@MyUtilities.common.makeProperty(default = "runMe.exe")
class shortcut_name():
"""What the .exe file is called."""
def setter(self, value: str):
self._shortcut_name = self.ensure_filePath(value, ending = ".exe", checkExists = False)
def getter(self):
return self._shortcut_name
@MyUtilities.common.makeProperty(default = None)
class icon():
"""What icon to use for the .exe file."""
def setter(self, value: str):
if (value):
self._icon = os.path.realpath(self.ensure_filePath(value, ending = ".ico"))
else:
self._icon = None
def getter(self):
return self._icon
@MyUtilities.common.makeProperty(default = "unknown")
class version():
"""The version number for the .exe file's info."""
def setter(self, value: str):
self._version = value
def getter(self):
return self._version
@MyUtilities.common.makeProperty(default = "unknown")
class description():
"""The description for the .exe file's info."""
def setter(self, value: str):
self._description = value
def getter(self):
return self._description
@MyUtilities.common.makeProperty(default = "unknown")
class author():
"""The author for the .exe file's info."""
def setter(self, value: str):
self._author = value
def getter(self):
return self._author
@MyUtilities.common.makeProperty(default = "unknown")
class author_email():
"""The author's email for the .exe file's info."""
def setter(self, value: str):
self._author_email = value
def getter(self):
return self._author_email
@MyUtilities.common.makeProperty(default = "dist")
class destination():
"""The pathway for the destination folder.
The destination folder does not need to exist.
Warning: Any files with the same name as ones that are generated will be overwritten.
Warning: All existing files in that directory will be fremoved first.
"""
def setter(self, value: str):
if (os.path.isabs(value)):
self._destination = value
return
#Be relative to the file that called this function, not this module
if (__name__ == "__main__"):
frame = MyUtilities.common.getCurrentframe(exclude = MyUtilities.common._srcfile)
else:
frame = MyUtilities.common.getCurrentframe(exclude = (_srcfile, MyUtilities.common._srcfile))
self.destination = os.path.join(os.path.dirname(frame.f_code.co_filename), value)
def getter(self):
return self._destination
@MyUtilities.common.makeProperty(default = None)
class preScript():
"""A .py script that should run before the .exe does."""
def setter(self, value: str):
if (value):
self._preScript = os.path.realpath(self.ensure_filePath(value, ending = ".py"))
else:
self._preScript = None
def getter(self):
return self._preScript
@MyUtilities.common.makeProperty(default = False)
class optimized():
"""Determines how to package the files for the .exe."""
def setter(self, value: str):
self._optimized = value
def getter(self):
return self._optimized
@MyUtilities.common.makeProperty(default = True)
class include_cmd():
"""Determines if the cmd window is shown or not
- If True: The cmd window will come up, which can be used for debugging.
- If False: No cmd window will come up. Erros are logged in a .txt in the same folder as the .exe file and with the same name as the .exe
"""
def setter(self, value: str):
self._include_cmd = value
def getter(self):
return self._include_cmd
#User Functions
def excludeModule(self, moduleName):
"""Excluded the given module in the .exe.
moduleName (str) - The name of the module to include
Example Input: includeModule("numpy")
"""
self.modules_exclude.add(moduleName)
def includeModule(self, moduleName):
"""Includes the given module in the .exe.
moduleName (str) - The name of the module to include
Example Input: includeModule("pickle")
"""
self.modules.add(moduleName)
def includeZip(self, source, destination):
#Example Input: includeZip("C:/Users/jmayberry/AppData/Local/Programs/Python/Python36-32/Lib/site-packages/validator_collection/_version.py", "validator_collection/_version.pyc")
self.zip_include.add((source, destination))
def excludeZip(self, moduleName):
self.zip_exclude_packages.add(moduleName)
def includeFile(self, filePath: str, *, recursive: bool = True, folder: str = "", compressed = True):
"""Includes the given file in the .exe.
filePath (str) - What file to include
- If directoiry: Will include all files in that directory
- If list: Will include all files in the list
folder (str) - What folder to put the included file in
Example Input("settings.ini")
Example Input("resources", folder = "resources")
Example Input("resources/*.ico", folder = "resources")
"""
container = self.data_files
# if (compressed):
# container = self.data_files_compressed
# else:
# container = self.data_files
for _filePath in self.ensure_container(filePath):
if (os.path.isfile(_filePath)):
container.add((os.path.realpath(_filePath), folder))
continue
for item in glob.iglob(_filePath, recursive = recursive):
container.add((item, folder))
def create(self, *, freshBuildDir: bool = True):
"""Creates the .exe file
Example Input: create()
"""
def yieldKwargs_console():
nonlocal self
yield "script", self.script
if (self.include_cmd):
yield "base", None
else:
yield "base", "Win32GUI"
if (self.icon):
yield "icon", self.icon
yield "initScript", self.preScript
yield "targetName", self.shortcut_name
def yieldKwargs_options():
nonlocal self
yield "build_exe", dict(yieldKwargs_build_exe())
#See: https://cx-freeze.readthedocs.io/en/latest/distutils.html#build-exe
def yieldKwargs_build_exe():
nonlocal self
yield "build_exe", self.destination
if (self.modules):
yield "packages", list(self.modules)
if (self.modules_exclude):
yield "excludes", list(self.modules_exclude)
# if (self.modules_include):
# yield "includes", list(self.modules_include)
if (self.data_files):
yield "include_files", list(self.data_files)
if (self.zip_include):
yield "zip_includes", list(self.zip_include)
if (self.optimized):
yield "zip_include_packages", "*"
if (self.zip_exclude_packages):
yield "zip_exclude_packages", list(self.zip_exclude_packages)
else:
yield "zip_exclude_packages", ""
yield "optimize", 2
####################################
if (not self.script):
errorMessage = "Must define 'self.script' before a .exe file can be created"
raise ValueError(errorMessage)
if (freshBuildDir):
clearDirectory(self.destination)
print("Creating .exe file...")
options = dict(yieldKwargs_options())
console = dict(yieldKwargs_console())
print("Options:", options)
print("Console:", console)
if (len(sys.argv) is 1):
sys.argv.append("build")
cx_Freeze.setup(
name = self.title,
author = self.author,
version = self.version,
description = self.description,
author_email = self.author_email,
options = options,
executables = [cx_Freeze.Executable(**console)],
)
if (__name__ == "__main__"):
exe = build("test.py")
exe.create()
```
#### File: JoshMayberry/EXE_Generator/utilities.py
```python
import os
import re
import shutil
import abc
import MyUtilities.common
data_files_zip = []
absPath = re.sub("([^\\\\])[\\\\]([^\\\\])", r"\1/\2", os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
class Utilities(MyUtilities.common.EnsureFunctions):
pass
class Exe_Base(Utilities, metaclass = abc.ABCMeta):
def __init__(self, mainFile, name = None, version = None, author = None, description = None):
"""Used to create a .exe file.
For good module structure practices, see: http://blog.habnab.it/blog/2013/07/21/python-packages-and-you/
Special thanks to <NAME> for how to detect 32 bit vs 64 bit on https://stackoverflow.com/questions/6107905/which-command-to-use-for-checking-whether-python-is-64bit-or-32bit/12057504#12057504
mainFile (str) - The name of the main .py file
Example Input: Exe_Base("runMe")
"""
self.options = {}
self.options["optimize"] = 0
self.options["excludes"] = []
self.options["includes"] = []
self.console = {}
self.console["script"] = ""
self.setInfoName(name)
self.setInfoAuthor(author)
self.setInfoVersion(version)
self.setInfoDescription(description)
self.data_files = []
self.setMain(mainFile)
def optimizeSize(self, excludeInterpreter = False, safer = False):
"""Makes the overall program smaller.
excludeInterpreter (bool) - If True: The Python interpreter will not be bundled
safer (bool) - There are some things that some computers don't have. Enable this to include those redundancies
Example Input: optimizeSize()
"""
print("Optimizing file size...")
self.options["optimize"] = 2
self.options["excludes"] = [
'_ssl',
'pyreadline',
#'locale', 'difflib', #Both of these are needed if you are importing your own modules
'doctest',
'optparse', 'pickle', 'calendar', 'pdb',
'unitest', #Exclude standard library
'numpy', #numpy is HUGE. Try to avoid it if you can
'tkinter',
]
# self.options["dll_excludes"] = ['tcl86t.dll', 'tk86t.dll'] #Tkinter
# if (not safer):
# self.options["dll_excludes"].append('msvcr71.dll') # Exclude msvcr71
def setDestination(self, filePath, freshDirectory = True):
"""Sets the destination folder for the program files.
The destination folder does not need to exist.
Warning: Any files with the same name as ones that are generated will be overwritten.
filePath (str) - The pathway for the destination folder
Example Input: setDestination("myProgram")
"""
if ((not freshDirectory) or (not os.path.exists(filePath))):
return
def onerror(function, path, exc_info):
"""An Error handler for shutil.rmtree.
Modified code from <NAME> on https://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied
"""
if (not os.access(path, os.W_OK)):
os.chmod(path, stat.S_IWUSR)
function(path)
else:
raise
shutil.rmtree(filePath, ignore_errors = False, onerror = onerror)
# @abc.abstractmethod
def setName(self, *args, **kwargs):
pass
def setMain(self, fileName):
"""Sets the file path to the main .py file.
fileName (str) - The name for the main .py file
Example Input: setMain("convertToExcel")
"""
if (not fileName.endswith(".py")):
fileName += ".py"
if (not os.path.exists(fileName)):
raise FileNotFoundError(fileName)
self.console["script"] = fileName
# @abc.abstractmethod
def setIcon(self, *args, **kwargs):
pass
def setInfoName(self, name = None):
"""Sets the name for the .exe file's info
name (str) - What the app is called
Example Input: setInfoName("start")
"""
if (name is None):
self.name = "MyApp"
else:
self.name = name
def setInfoVersion(self, version = None):
"""Sets the version number for the .exe file's info
version (str) - The version number. Can be an int, float, or double.
Example Input: setInfoVersion("1.0")
"""
if (version is None):
self.version = "unknown"
else:
self.version = f"{version}"
def setInfoDescription(self, description = None):
"""Sets the icon for the .exe file's info
description (str) - What the program is meant to do
Example Input: setInfoDescription("Converts a .ias file to an excel sheet")
"""
if (description is None):
self.description = "unknown"
else:
self.description = f"{description}"
def setInfoAuthor(self, author = None):
"""Sets the author for the .exe file's info
author (str) - Who created the program
Example Input: setInfoAuthor("<NAME>")
"""
if (author is None):
self.author = "unknown"
else:
self.author = f"{author}"
def addFile(self, myInput, outputFolder = "", module = False, keepFolders = True, insideZip = False, ignore = None, zipOption = None):
"""Adds an extra file to the overall program bundle.
myInput (str) - The path to the .icon file.
If a folder is provided, all items in that folder will be added
outputFolder (str) - The name for the folder where the file will be located
module (bool) - If True: the input given is a path to a module, not a file name
keepFolders (bool) - Determines how the folder structure will be inside the added file
- If True: Will keep files in the folders they were in before
- If False: Will put all files in one folder
- If None: Will not look in sub-folders
insideZip (bool) - Determines how to include the given files if it is bundling things into library.zip
- If True: Will put them inside 'library.zip'
- If True: Will put them outside 'library.zip'
ignore (str) - If anything with this name is in the provided folder, it will ignore that object (will not look inside if it is a folder)
- Can provide a list of strings too
Example Input: addFile("resources/cte_icon3.ico", "resources")
Example Input: addFile("RP2005.dll", insideZip = True)
"""
if (ignore == None):
ignore = []
elif (not isinstance(ignore, (list, tuple))):
ignore = [ignore]
def getFullPath(filePath):
"""Returns the full path of the provided item."""
nonlocal self, ignore
if ((filePath in ignore) or (os.path.basename(filePath) in ignore)):
return None
if (".." in filePath):
return re.sub("\.\.", absPath, filePath)
return filePath
def getContents(filePath):
"""Returns the contents of the provided directory."""
nonlocal self
filePath = getFullPath(filePath)
if (filePath == None):
return []
elif (os.path.isfile(filePath)):
return [filePath]
else:
queue = []
for item in os.listdir(filePath):
queue.extend(getContents(os.path.join(filePath, item)))
return queue
################################################
# print("Adding file(s)...")
#Configure the list of documentation files
if (type(myInput) == str):
if (".." in myInput):
myInput_abs = re.sub("\.\.", absPath, myInput)
else:
myInput_abs = None
if (not module):
# print("@0", myInput)
#Determine contents
outputContents = getContents(myInput)
else:
outputContents = [myInput]
#Account for windows backslash escaping
outputContents = [re.sub("([^\\\\])[\\\\]([^\\\\])", r"\1/\2", item) for item in outputContents]
#Keep folder structure
if (keepFolders != None):
if (keepFolders):
structure = {} #{destination folder: source contents} Example: {'database': ['database/labelContent.db']}; {'C:/Users/Josh/Documents/Python/modules/API_ExcelManipulator': ['C:/Users/Josh/Documents/Python/modules/API_ExcelManipulator/controller.py', 'C:/Users/Josh/Documents/Python/modules/API_ExcelManipulator/LICENSE', 'C:/Users/Josh/Documents/Python/modules/API_ExcelManipulator/version.py', 'C:/Users/Josh/Documents/Python/modules/API_ExcelManipulator/__init__.py']}
#Discover the file structure
for item in outputContents:
if (os.path.isabs(item)):
rootItem = re.sub(f"^{myInput_abs}[/\\\\]", "", item)
rootItem = re.sub(f"^{myInput_abs}", "", rootItem)
else:
if (os.path.isfile(myInput)):
rootItem = re.sub(f"^{os.path.dirname(myInput)}[/\\\\]", "", item)
rootItem = re.sub(f"^{os.path.dirname(myInput)}", "", rootItem)
else:
rootItem = re.sub(f"^{myInput}[/\\\\]", "", item)
rootItem = re.sub(f"^{myInput}", "", rootItem)
folder = os.path.join(outputFolder, rootItem)
if (folder not in structure):
structure[folder] = []
structure[folder].append(item)
#Add file structure
for folder, contents in structure.items():
if (insideZip and self.options.get(zipOption)):
data_files_zip.append((folder, contents))
else:
self.data_files.append((folder, contents))
else:
#Add all files
if (self.options["compressed"] and insideZip):
data_files_zip.append((outputFolder, outputContents))
else:
self.data_files.append((outputFolder, outputContents))
else:
#Only add the top level
if (self.options["compressed"] and insideZip):
data_files_zip.append((outputFolder, outputContents))
else:
self.data_files.append((outputFolder, outputContents))
else:
#Modules themselves are passed instead of the path to them
outputContents = [myInput]
if (self.options["compressed"] and insideZip):
data_files_zip.append(outputContents)
else:
self.data_files.append(outputContents)
def addInclude(self, moduleList, userModule = False):
"""Adds a list of modules to the 'include this' list.
Also makes sure that any dependant modules are included
moduleList (list) - Modules to include in the overall bundle as strings
Example Input: addInclude(["PIL", "numpy"])
Example Input: addInclude("PIL")
Example Input: addInclude("modules.GUI_Maker")
"""
#Ensure correct type
if (type(moduleList) != list):
moduleList = [moduleList]
if (userModule):
#Ensure nested modules are included too
nestedModules = []
for module in moduleList:
exec(f"import {module}")
modulePath = os.path.dirname(inspect.getfile(eval(module)))
for item in inspect.getmembers(eval(module), inspect.ismodule):
if (not item[0] in sys.builtin_module_names):
nestedPath = inspect.getfile(item[1])
if (not modulePath in nestedPath):
nestedModules.append(item[1])
# nestedModules.extend([item[0] for item in inspect.getmembers(eval(module), inspect.ismodule)])
self.options["includes"].extend(nestedModules)
else:
self.options["includes"].extend(moduleList)
#Remove duplicates
self.options["includes"] = list(dict.fromkeys(self.options["includes"]))
# #Remove private modules
# self.options["includes"] = [item for item in self.options["includes"] if (item[0] != "_")]
def addExclude(self, moduleList, dll = False):
"""Adds a list of modules to the 'do not include this' list.
moduleList (list) - Modules to not include in the overall bundle as strings
dll (bool) - Determines if it is a dll exclude or not
Example Input: addExclude(["PIL", "numpy"])
Example Input: addExclude("PIL")
"""
#Ensure correct type
if (type(moduleList) == str):
moduleList = [moduleList]
if (dll):
self.options["dll_excludes"].extend(moduleList)
else:
self.options["excludes"].extend(moduleList)
@abc.abstractmethod
def create(self, *args, **kwargs):
pass
``` |
{
"source": "JoshMayberry/LogScraper",
"score": 3
} |
#### File: JoshMayberry/LogScraper/runMe.py
```python
import os
import sys
def main():
"""The first function that should run."""
preventClose = True
try:
import controller
controller.main()
except SystemExit:
preventClose = False
raise
except:
import traceback
#Display what happened
traceback.print_exc()
# if (restart(cmdArgs)):
# preventClose = False
# # os.execl(sys.executable, sys.executable, *sys.argv)
finally:
import time
#Keep the cmd window from closing
if (preventClose):
while True:
time.sleep(1)
if (__name__ == "__main__"):
main()
``` |
{
"source": "JoshMayberry/ME342Final",
"score": 2
} |
#### File: ME342Final/docs/title_page.py.py
```python
import wx
import wx.xrc
###########################################################################
## Class Frm_Subject
###########################################################################
class Frm_Subject ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Problem Solver", pos = wx.DefaultPosition, size = wx.Size( 345,135 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
sz_Subject = wx.BoxSizer( wx.VERTICAL )
self.txt_subject = wx.StaticText( self, wx.ID_ANY, u"Choose Your Subject", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_subject.Wrap( -1 )
self.txt_subject.SetFont( wx.Font( 25, 70, 90, 90, True, wx.EmptyString ) )
self.txt_subject.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOWTEXT ) )
sz_Subject.Add( self.txt_subject, 0, wx.ALL, 5 )
sz_btns = wx.FlexGridSizer( 0, 2, 0, 0 )
sz_btns.SetFlexibleDirection( wx.BOTH )
sz_btns.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.btn_Thermo = wx.Button( self, wx.ID_ANY, u"Thermodynamics", wx.DefaultPosition, wx.DefaultSize, 0 )
self.btn_Thermo.SetDefault()
sz_btns.Add( self.btn_Thermo, 0, wx.ALL, 5 )
sz_Subject.Add( sz_btns, 1, wx.EXPAND, 5 )
self.SetSizer( sz_Subject )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.btn_Thermo.Bind( wx.EVT_BUTTON, self.onBtnClick_ContinueToSetup )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def onBtnClick_ContinueToSetup( self, event ):
event.Skip()
###########################################################################
## Class Frm_ThermoSetup
###########################################################################
class Frm_ThermoSetup ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 304,307 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
sz_ThermoSetup = wx.BoxSizer( wx.VERTICAL )
self.tit_TS_Setup = wx.StaticText( self, wx.ID_ANY, u"Thermodynamics", wx.DefaultPosition, wx.DefaultSize, 0 )
self.tit_TS_Setup.Wrap( -1 )
sz_ThermoSetup.Add( self.tit_TS_Setup, 0, wx.ALL|wx.EXPAND, 5 )
sz_TS_Setups = wx.FlexGridSizer( 0, 3, 0, 0 )
sz_TS_Setups.SetFlexibleDirection( wx.BOTH )
sz_TS_Setups.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
sz_TS_Medium = wx.FlexGridSizer( 0, 1, 0, 0 )
sz_TS_Medium.SetFlexibleDirection( wx.BOTH )
sz_TS_Medium.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TS_Medium = wx.StaticText( self, wx.ID_ANY, u"Medium", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TS_Medium.Wrap( -1 )
sz_TS_Medium.Add( self.txt_TS_Medium, 0, wx.ALL, 5 )
self.btn_TS_Medium1 = wx.RadioButton( self, wx.ID_ANY, u"Ideal Gas", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP )
self.btn_TS_Medium1.SetValue( True )
sz_TS_Medium.Add( self.btn_TS_Medium1, 0, wx.ALL, 5 )
self.btn_TS_Medium2 = wx.RadioButton( self, wx.ID_ANY, u"Water", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Medium.Add( self.btn_TS_Medium2, 0, wx.ALL, 5 )
self.btn_TS_Medium3 = wx.RadioButton( self, wx.ID_ANY, u"R-132a", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Medium.Add( self.btn_TS_Medium3, 0, wx.ALL, 5 )
self.txt_TS_System = wx.StaticText( self, wx.ID_ANY, u"System", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TS_System.Wrap( -1 )
sz_TS_Medium.Add( self.txt_TS_System, 0, wx.ALL, 5 )
self.btn_TS_System1 = wx.RadioButton( self, wx.ID_ANY, u"Closed", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP )
self.btn_TS_System1.SetValue( True )
sz_TS_Medium.Add( self.btn_TS_System1, 0, wx.ALL, 5 )
self.btn_TS_System2 = wx.RadioButton( self, wx.ID_ANY, u"Steady", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Medium.Add( self.btn_TS_System2, 0, wx.ALL, 5 )
self.btn_TS_System3 = wx.RadioButton( self, wx.ID_ANY, u"Unsteady", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Medium.Add( self.btn_TS_System3, 0, wx.ALL, 5 )
sz_TS_Setups.Add( sz_TS_Medium, 1, wx.EXPAND, 5 )
sz_TS_Container = wx.FlexGridSizer( 0, 1, 0, 0 )
sz_TS_Container.SetFlexibleDirection( wx.BOTH )
sz_TS_Container.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TS_Container = wx.StaticText( self, wx.ID_ANY, u"Container", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TS_Container.Wrap( -1 )
sz_TS_Container.Add( self.txt_TS_Container, 0, wx.ALL, 5 )
self.btn_TS_Container1 = wx.RadioButton( self, wx.ID_ANY, u"Rigid", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP )
self.btn_TS_Container1.SetValue( True )
sz_TS_Container.Add( self.btn_TS_Container1, 0, wx.ALL, 5 )
self.btn_TS_Container2 = wx.RadioButton( self, wx.ID_ANY, u"Piston", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Container.Add( self.btn_TS_Container2, 0, wx.ALL, 5 )
self.btn_TS_Container3 = wx.RadioButton( self, wx.ID_ANY, u"Membrane", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Container.Add( self.btn_TS_Container3, 0, wx.ALL, 5 )
self.btn_TS_Container4 = wx.RadioButton( self, wx.ID_ANY, u"Nozzle", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Container.Add( self.btn_TS_Container4, 0, wx.ALL, 5 )
self.btn_TS_Container5 = wx.RadioButton( self, wx.ID_ANY, u"Turbine", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Container.Add( self.btn_TS_Container5, 0, wx.ALL, 5 )
self.btn_TS_Container6 = wx.RadioButton( self, wx.ID_ANY, u"Heat Exchanger", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Container.Add( self.btn_TS_Container6, 0, wx.ALL, 5 )
self.btn_TS_Container7 = wx.RadioButton( self, wx.ID_ANY, u"Mixing Chaimber", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Container.Add( self.btn_TS_Container7, 0, wx.ALL, 5 )
sz_TS_Setups.Add( sz_TS_Container, 1, wx.EXPAND, 5 )
sz_TI_Etc = wx.FlexGridSizer( 0, 1, 0, 0 )
sz_TI_Etc.SetFlexibleDirection( wx.BOTH )
sz_TI_Etc.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TS_Etc = wx.StaticText( self, wx.ID_ANY, u"Etc", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TS_Etc.Wrap( -1 )
sz_TI_Etc.Add( self.txt_TS_Etc, 0, wx.ALL, 5 )
self.btn_TS_Adiabadic = wx.CheckBox( self, wx.ID_ANY, u"Adiabadic", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TI_Etc.Add( self.btn_TS_Adiabadic, 0, wx.ALL, 5 )
self.btn_TS_Isothermal = wx.CheckBox( self, wx.ID_ANY, u"Isothermal", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TI_Etc.Add( self.btn_TS_Isothermal, 0, wx.ALL, 5 )
self.btn_TS_Reversable = wx.CheckBox( self, wx.ID_ANY, u"Reversable", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TI_Etc.Add( self.btn_TS_Reversable, 0, wx.ALL, 5 )
self.btn_TS_Polytropic = wx.CheckBox( self, wx.ID_ANY, u"Polytropic", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TI_Etc.Add( self.btn_TS_Polytropic, 0, wx.ALL, 5 )
self.btn_TS_Valve = wx.CheckBox( self, wx.ID_ANY, u"Valve", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TI_Etc.Add( self.btn_TS_Valve, 0, wx.ALL, 5 )
self.txt_TS_Units = wx.StaticText( self, wx.ID_ANY, u"Units", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TS_Units.Wrap( -1 )
sz_TI_Etc.Add( self.txt_TS_Units, 0, wx.ALL, 5 )
units_TS_ChooseChoices = []
self.units_TS_Choose = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 80,-1 ), units_TS_ChooseChoices, 0 )
self.units_TS_Choose.SetSelection( 0 )
sz_TI_Etc.Add( self.units_TS_Choose, 0, wx.ALL, 5 )
sz_TS_Setups.Add( sz_TI_Etc, 1, wx.EXPAND, 5 )
sz_ThermoSetup.Add( sz_TS_Setups, 1, wx.EXPAND, 5 )
self.btn_TS_Continue = wx.Button( self, wx.ID_ANY, u"Continue", wx.DefaultPosition, wx.DefaultSize, 0 )
self.btn_TS_Continue.SetDefault()
sz_ThermoSetup.Add( self.btn_TS_Continue, 0, wx.ALL, 5 )
self.SetSizer( sz_ThermoSetup )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.btn_TS_Medium1.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Medium_IdealGas )
self.btn_TS_Medium2.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Medium_Water )
self.btn_TS_Medium3.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Medium_R132 )
self.btn_TS_System1.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_System_Closed )
self.btn_TS_System2.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_System_Steady )
self.btn_TS_System3.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_System_Unsteady )
self.btn_TS_Container1.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_Rigid )
self.btn_TS_Container2.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_Piston )
self.btn_TS_Container3.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_Membrane )
self.btn_TS_Container4.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_Nozzle )
self.btn_TS_Container5.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_Turbine )
self.btn_TS_Container6.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_HeatExch )
self.btn_TS_Container7.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_Mixing )
self.btn_TS_Adiabadic.Bind( wx.EVT_CHECKBOX, self.onBtnClick_Etc_Adiabadic )
self.btn_TS_Isothermal.Bind( wx.EVT_CHECKBOX, self.onBtnClick_Etc_Isothermal )
self.btn_TS_Reversable.Bind( wx.EVT_CHECKBOX, self.onBtnClick_Etc_Reversable )
self.btn_TS_Polytropic.Bind( wx.EVT_CHECKBOX, self.onBtnClick_Etc_Polytropic )
self.btn_TS_Valve.Bind( wx.EVT_CHECKBOX, self.onBtnClick_Etc_Valve )
self.units_TS_Choose.Bind( wx.EVT_CHOICE, self.onUnits_TS_Choice )
self.btn_TS_Continue.Bind( wx.EVT_BUTTON, self.onBtnClick_ContinueToInput )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def onBtnClick_Medium_IdealGas( self, event ):
event.Skip()
def onBtnClick_Medium_Water( self, event ):
event.Skip()
def onBtnClick_Medium_R132( self, event ):
event.Skip()
def onBtnClick_System_Closed( self, event ):
event.Skip()
def onBtnClick_System_Steady( self, event ):
event.Skip()
def onBtnClick_System_Unsteady( self, event ):
event.Skip()
def onBtnClick_Container_Rigid( self, event ):
event.Skip()
def onBtnClick_Container_Piston( self, event ):
event.Skip()
def onBtnClick_Container_Membrane( self, event ):
event.Skip()
def onBtnClick_Container_Nozzle( self, event ):
event.Skip()
def onBtnClick_Container_Turbine( self, event ):
event.Skip()
def onBtnClick_Container_HeatExch( self, event ):
event.Skip()
def onBtnClick_Container_Mixing( self, event ):
event.Skip()
def onBtnClick_Etc_Adiabadic( self, event ):
event.Skip()
def onBtnClick_Etc_Isothermal( self, event ):
event.Skip()
def onBtnClick_Etc_Reversable( self, event ):
event.Skip()
def onBtnClick_Etc_Polytropic( self, event ):
event.Skip()
def onBtnClick_Etc_Valve( self, event ):
event.Skip()
def onUnits_TS_Choice( self, event ):
event.Skip()
def onBtnClick_ContinueToInput( self, event ):
event.Skip()
###########################################################################
## Class Frm_ThermoInput
###########################################################################
class Frm_ThermoInput ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 856,300 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
siz_ThermoInput_Title = wx.BoxSizer( wx.VERTICAL )
self.tit_TI_Input = wx.StaticText( self, wx.ID_ANY, u"Inputs", wx.DefaultPosition, wx.DefaultSize, 0 )
self.tit_TI_Input.Wrap( -1 )
siz_ThermoInput_Title.Add( self.tit_TI_Input, 0, wx.ALL, 5 )
sz_ThermoInput_Inputs = wx.FlexGridSizer( 0, 3, 0, 0 )
sz_ThermoInput_Inputs.SetFlexibleDirection( wx.BOTH )
sz_ThermoInput_Inputs.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
sz_TI_State1 = wx.FlexGridSizer( 0, 3, 0, 0 )
sz_TI_State1.SetFlexibleDirection( wx.BOTH )
sz_TI_State1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TI_State1 = wx.StaticText( self, wx.ID_ANY, u"State 1", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_State1.Wrap( -1 )
sz_TI_State1.Add( self.txt_TI_State1, 0, wx.ALL, 5 )
self.txt_TI_spacer11 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_spacer11.Wrap( -1 )
sz_TI_State1.Add( self.txt_TI_spacer11, 0, wx.ALL, 5 )
self.txt_TI_spacer12 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_spacer12.Wrap( -1 )
sz_TI_State1.Add( self.txt_TI_spacer12, 0, wx.ALL, 5 )
self.txt_TI_P1 = wx.StaticText( self, wx.ID_ANY, u"P1", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_P1.Wrap( -1 )
sz_TI_State1.Add( self.txt_TI_P1, 0, wx.ALL, 5 )
self.val_TI_P1 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
sz_TI_State1.Add( self.val_TI_P1, 0, wx.ALL, 5 )
unit_TI_P1Choices = []
self.unit_TI_P1 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 60,-1 ), unit_TI_P1Choices, 0 )
self.unit_TI_P1.SetSelection( 0 )
sz_TI_State1.Add( self.unit_TI_P1, 0, wx.ALL, 5 )
self.txt_TI_V1 = wx.StaticText( self, wx.ID_ANY, u"V1", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_V1.Wrap( -1 )
sz_TI_State1.Add( self.txt_TI_V1, 0, wx.ALL, 5 )
self.val_TI_V1 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
sz_TI_State1.Add( self.val_TI_V1, 0, wx.ALL, 5 )
unit_TI_V1Choices = []
self.unit_TI_V1 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 60,-1 ), unit_TI_V1Choices, 0 )
self.unit_TI_V1.SetSelection( 0 )
sz_TI_State1.Add( self.unit_TI_V1, 0, wx.ALL, 5 )
sz_ThermoInput_Inputs.Add( sz_TI_State1, 1, wx.EXPAND, 5 )
sz_TI_State2 = wx.FlexGridSizer( 0, 3, 0, 0 )
sz_TI_State2.SetFlexibleDirection( wx.BOTH )
sz_TI_State2.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TI_State2 = wx.StaticText( self, wx.ID_ANY, u"State 2", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_State2.Wrap( -1 )
sz_TI_State2.Add( self.txt_TI_State2, 0, wx.ALL, 5 )
self.txt_TI_spacer21 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_spacer21.Wrap( -1 )
sz_TI_State2.Add( self.txt_TI_spacer21, 0, wx.ALL, 5 )
self.txt_TI_spacer22 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_spacer22.Wrap( -1 )
sz_TI_State2.Add( self.txt_TI_spacer22, 0, wx.ALL, 5 )
self.txt_TI_P2 = wx.StaticText( self, wx.ID_ANY, u"P2", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_P2.Wrap( -1 )
sz_TI_State2.Add( self.txt_TI_P2, 0, wx.ALL, 5 )
self.val_TI_P2 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
sz_TI_State2.Add( self.val_TI_P2, 0, wx.ALL, 5 )
unit_TI_P2Choices = []
self.unit_TI_P2 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 60,-1 ), unit_TI_P2Choices, 0 )
self.unit_TI_P2.SetSelection( 0 )
sz_TI_State2.Add( self.unit_TI_P2, 0, wx.ALL, 5 )
self.txt_TI_V2 = wx.StaticText( self, wx.ID_ANY, u"V2", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_V2.Wrap( -1 )
sz_TI_State2.Add( self.txt_TI_V2, 0, wx.ALL, 5 )
self.val_TI_V2 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
sz_TI_State2.Add( self.val_TI_V2, 0, wx.ALL, 5 )
m_choice4Choices = []
self.m_choice4 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 60,-1 ), m_choice4Choices, 0 )
self.m_choice4.SetSelection( 0 )
sz_TI_State2.Add( self.m_choice4, 0, wx.ALL, 5 )
sz_ThermoInput_Inputs.Add( sz_TI_State2, 1, wx.EXPAND, 5 )
sz_TI_OtherMain = wx.FlexGridSizer( 0, 1, 0, 0 )
sz_TI_OtherMain.SetFlexibleDirection( wx.BOTH )
sz_TI_OtherMain.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
sz_TI_OtherTitle = wx.FlexGridSizer( 0, 2, 0, 0 )
sz_TI_OtherTitle.SetFlexibleDirection( wx.BOTH )
sz_TI_OtherTitle.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TI_Other = wx.StaticText( self, wx.ID_ANY, u"Other", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_Other.Wrap( -1 )
sz_TI_OtherTitle.Add( self.txt_TI_Other, 0, wx.ALL, 5 )
self.m_staticText24 = wx.StaticText( self, wx.ID_ANY, u" ", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText24.Wrap( -1 )
sz_TI_OtherTitle.Add( self.m_staticText24, 0, wx.ALL, 5 )
self.m_staticText25 = wx.StaticText( self, wx.ID_ANY, u"MyLabel", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText25.Wrap( -1 )
sz_TI_OtherTitle.Add( self.m_staticText25, 0, wx.ALL, 5 )
m_choice8Choices = []
self.m_choice8 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice8Choices, 0 )
self.m_choice8.SetSelection( 0 )
sz_TI_OtherTitle.Add( self.m_choice8, 0, wx.ALL, 5 )
sz_TI_OtherMain.Add( sz_TI_OtherTitle, 1, wx.EXPAND, 5 )
sz_TI_Other = wx.FlexGridSizer( 0, 3, 0, 0 )
sz_TI_Other.SetFlexibleDirection( wx.BOTH )
sz_TI_Other.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TI_W = wx.StaticText( self, wx.ID_ANY, u"W", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_W.Wrap( -1 )
sz_TI_Other.Add( self.txt_TI_W, 0, wx.ALL, 5 )
self.val_TI_W = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
sz_TI_Other.Add( self.val_TI_W, 0, wx.ALL, 5 )
m_choice5Choices = []
self.m_choice5 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 60,-1 ), m_choice5Choices, 0 )
self.m_choice5.SetSelection( 0 )
sz_TI_Other.Add( self.m_choice5, 0, wx.ALL, 5 )
self.txt_TI_Q = wx.StaticText( self, wx.ID_ANY, u"Q", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_Q.Wrap( -1 )
sz_TI_Other.Add( self.txt_TI_Q, 0, wx.ALL, 5 )
self.val_TI_Q = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
sz_TI_Other.Add( self.val_TI_Q, 0, wx.ALL, 5 )
m_choice6Choices = []
self.m_choice6 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 60,-1 ), m_choice6Choices, 0 )
self.m_choice6.SetSelection( 0 )
sz_TI_Other.Add( self.m_choice6, 0, wx.ALL, 5 )
sz_TI_OtherMain.Add( sz_TI_Other, 1, wx.EXPAND, 5 )
sz_ThermoInput_Inputs.Add( sz_TI_OtherMain, 1, wx.EXPAND, 5 )
siz_ThermoInput_Title.Add( sz_ThermoInput_Inputs, 1, wx.EXPAND, 5 )
self.btn_TI_Continue = wx.Button( self, wx.ID_ANY, u"Continue", wx.DefaultPosition, wx.DefaultSize, 0 )
siz_ThermoInput_Title.Add( self.btn_TI_Continue, 0, wx.ALL, 5 )
self.SetSizer( siz_ThermoInput_Title )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.val_TI_P1.Bind( wx.EVT_TEXT_ENTER, self.onVal_TI_P1 )
self.unit_TI_P1.Bind( wx.EVT_CHOICE, self.onUnit_Chose )
self.val_TI_V1.Bind( wx.EVT_TEXT_ENTER, self.onVal_TI_V1 )
self.val_TI_P2.Bind( wx.EVT_TEXT_ENTER, self.onVal_TI_P2 )
self.val_TI_V2.Bind( wx.EVT_TEXT_ENTER, self.onVal_TI_V2 )
self.val_TI_W.Bind( wx.EVT_TEXT_ENTER, self.onVal_TI_W )
self.val_TI_Q.Bind( wx.EVT_TEXT_ENTER, self.onVal_TI_Q )
self.btn_TI_Continue.Bind( wx.EVT_BUTTON, self.onBtnClick_ContinueToResults )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def onVal_TI_P1( self, event ):
event.Skip()
def onUnit_Chose( self, event ):
event.Skip()
def onVal_TI_V1( self, event ):
event.Skip()
def onVal_TI_P2( self, event ):
event.Skip()
def onVal_TI_V2( self, event ):
event.Skip()
def onVal_TI_W( self, event ):
event.Skip()
def onVal_TI_Q( self, event ):
event.Skip()
def onBtnClick_ContinueToResults( self, event ):
event.Skip()
```
#### File: source/logic/logicCalculator.py
```python
import wx
import numpy as np #For the argmax() functionality
import math
import copy #This is needed because sometimes the temp lists modify their parent lists. So, deep copies are made.
class LogicCalculator:
"""
This is the main class for the calculator function.
First, it determines what functions to use. It does this by seeing what variables you gave it.
By seeing what variables you gave it, it can determine which equations are available.
New variables can be gained by solving simple equations. Thsi will gain acess to new equations to use.
Equations with one unknown are solved using a root finder.
Multiple Equations with multiple unknowns are solved using Gauss-Sidel.
Once the equations have been solved, it returns all the information it found. This is displayed on a new screen.
Psudocode:
- What have I been given? What do I need to find?
- What equations contain what I need to find?
- Which of those equations include the variables I have been given?
- What equations can I use to get the variables that I still need?
Note: Up till this point, no equations have been run. Just an analysis of what variables they contain.
- Solve the equations, and return your answer.
"""
def __init__(self,*args,**kwargs):
"""
'subject' is what subject you are solving for. It is a string.
'args' is [subject,goal]. There can be multiple goals, because goals is a list
'kwargs' is a dictionary containing all the variables, known and unknown.
"""
print('Begin Calculation')
self = LogicCalculator
#What have I been given?
self.subject = args[0][0]
print('\nsubject: ',self.subject, '\nmedium: ',self.medium)
#What is known? What is not known?
self.unknown,self.known = {},{} #Blank dictionaries
for i in kwargs.items():
if '@' not in i:
if ('unknown' in i) or ('' in i):
if i[0][0] != 'U': self.unknown.update({i[0]:i[1]})
else:
if i[0][0] != 'U': self.known.update({i[0]:i[1]})
#For now, we will not worry about these.
self.unknown.update({'MM':'unknown','R':'unknown','Tcr':'unknown','Pcr':'unknown','Vcr':'unknown'})
print('\nknown: ',self.known,'\nunknown: ',self.unknown)
#What do I need to find?
self.goal = args[1]
print('\ngoal: ',self.goal)
for element in ['known','unknown','goal']: #Set the values in the system
for item in getattr(self,element).items():
setattr(self,item[0],item[1])
#Equations
##Retrieve the correct Equation Database & other things pertaining to the subject.
print('Loading Database')
if self.subject == 'thermo':
from .logicThermoEquations import LogicThermoEquations
from .logicThermoTableLookup import TableUtilities
self.medium = args[2]
constants = LogicThermoEquations.constants()
for item in constants.items():
setattr(self,item[0],item[1]) #Record the constants
self.eqnDatabase = LogicThermoEquations.eqnDatabase(self)
print(' ~ Thermo Database Loaded')
table_utils = TableUtilities()
##Lookup all unknown values that can be gotten from the Thermo Tables
if self.medium in ['Water', 'R134a']:
pass #Get this working
else:
if 'MM' in self.unknown:
self.MM = table_utils.TableDecider(['A1',['Molar Mass',self.medium,'N/A']])
if 'R' in self.unknown:
self.R = table_utils.TableDecider(['A1',['Gas Constant R',self.medium,'N/A']])
if 'Tcr' in self.unknown:
self.Tcr = table_utils.TableDecider(['A1',['Critical Temperature',self.medium,'N/A']])
if 'Pcr' in self.unknown:
self.Pcr = table_utils.TableDecider(['A1',['Critical Pressure',self.medium,'N/A']])
if 'Vcr' in self.unknown:
self.Vcr = table_utils.TableDecider(['A1',['Critical Volume',self.medium,'N/A']])
for element in ['MM','R','Tcr','Pcr','Vcr']:
if element in self.unknown:
del self.unknown[element] #Remove found values from the unknown list
self.known.update({element:getattr(self,element)}) #Add found values to the known list
self.interThermoPassed = False #This will be true once it has been able to do a full table lookup.
self.intermediateStep(self)
# elif self.subject == 'statics': #This is to show how to add another subject.
# from .logicStaticsEquations import LogicStaticsEquations
# self.eqnDatabase = LogicStaticsEquations.eqnDatabase(self)
##Search through that Database for the relevant equations
temp = self.unknownGoalSetup(self)
self.equationFinder(self,temp)
#Solve the equations
self.solver(self,self.equations) #Find any others that can be found
#Return your answer
#Have it go through the answers and return only the ones that we want.
def intermediateStep(self):
"""
This does things that must be checked between steps of gauss sidel or linear solver.
For thermo, this is a check if the values of the tables can be/have been found.
Once self.interThermoPassed == True, then this doesn't run any more, because all values have been gotten from the tables.
"""
if self.subject == 'thermo':
if self.interThermoPassed == False:
table_utils = TableUtilities()
answer = table_utils.TableEnough(self.unknown, self.known, self.medium)
if answer != []: #There was enough
self.interThermoPassed = True
for element in answer:
setattr(self,element[0],element[1])
del self.unknown[element[0]]
self.known.update({element[0]:element[1]})
def eqnDatabaseContains(self,varsSearch,varsAfter):
"""
This simply searches for what equations in the database contain the given variables.
It then chooses the best one.
'varsSearch' is the variables to search for
'varsAfter' is the variables contained in the equation after the one before this new one.
"""
possibleEqns,possibleNames = {},[]
for var in varsSearch.items(): #Look at each variable in turn
# if var[1] != 0: #Don't waste time on the zero value variables (Remove this part?)
for eqn in self.eqnDatabase.items():
for after in varsAfter:
if (var[0] in eqn[1]) and (after in eqn[1]): #If it contains somthing I am looking for & a var from after
possibleEqns.update({eqn[0]:eqn[1]}) #Add that equation to possibleEqns
possibleNames.append(eqn[0])
if possibleEqns == {}: #There was nothing that fit the joint criteria above, just focus on the varsSearch.
for var in varsSearch.items(): #Look at each variable in turn
for eqn in self.eqnDatabase.items():
if var[0] in eqn[1]: #If it contains somthing I am looking for
possibleEqns.update({eqn[0]:eqn[1]}) #Add that equation to possibleEqns
possibleNames.append(eqn[0])
#Analyze each one.
countList = [[],[]]
self.varsAfter = varsAfter
for eqn in possibleEqns.items(): #Look at each possible equation in turn
nameList = ['unknown','varsAfter']
for i in range(2):
count = 0
for var in getattr(self,nameList[i]).keys(): #How many of each does it have?
if var in eqn[1]: count +=1
countList[i].append(count)
##Best: Has the most varAfter. If no varsAfter: Has the least unknowns, but atleast 2
indexList = [np.array(countList[0]).argmax(),np.array(countList[1]).argmin()]#Most varsAfter or least unknown
if countList[1][indexList[1]] > 0: self.myEqns.update({possibleNames[indexList[1]]:self.eqnDatabase[possibleNames[indexList[1]]]})
else: self.myEqns.update({possibleNames[indexList[0]]:self.eqnDatabase[possibleNames[indexList[0]]]})
def unknownGoalSetup(self):
"""
This creates a list of all the unknowns, with the goal tagged onto the end.
"""
temp = []
for item in self.unknown.items():
temp.append(item[0])
for item in self.goal.items():
temp.append(item[0])
return temp
def equationFinder(self,unknownList):
"""
This searches through the equation database for which equations can be used.
It then follows every pathway possible to get to the goal, and puts it in a dictionary.
The pathways that do not work are deleted from the dictionary.
It returns the equations with the variable you solve it for as a list that is in the order to be solved.
Limitations: This does not setup to solve 2 equations with 2 unknowns.
It sets up o solve solves 1 equation with 1 unknown, then another equation with 2 unknowns,
where one of the unknowns happens to be the same as the one that was just solved for.
"""
print('Searching the database')
n = (len(self.eqnDatabase))
col,row = -1,-1
earlyPaths = {} #These are the pathways that were completed before all unknowns were used up.
temp = {} #A dictionary of all the ways
wayCount = 0 #How many ways there are to solve it
myMatrix = [[],[[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]]
###This is still undr development.
#Input
known = {'T1': 453.15, 'P1': 130.0, 'V1': 0.07, 'P2': 80.0}
unknown = {'MM': 'unknown', 'x1': 'unknown', 'Cp': 'unknown', 'u2': 'unknown', 's1': 'unknown', 'V2': 'unknown', 'x2': 'unknown', 'R': 'unknown', 's2': 'unknown', 'v1': '', 'Q': 'unknown', 'v2': 'unknown', 'Pcr': 'unknown', 'T2': 'unknown', 'Cv': 'unknown', 'k': 'unknown', 'Cavg': 'unknown', 'Tcr': 'unknown', 'm2': 'unknown', 'roe': 'unknown', 'u1': 'unknown', 'Vcr': 'unknown', 'm1': 'unknown'}
goal = {'W': 'unknown'}
n = (len(dataBase))
col,row = 0,0
earlyPaths = {} #These are the pathways that were completed before all unknowns were used up.
paths = {} #A dictionary of all the ways
wayCount = 0 #How many ways there are to solve it
myMatrix = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
for endVar in goal.items(): #First, label which variable is for which column
for var in unknown.items():
myMatrix[0].append(var[0])
myMatrix[0].append(endVar[0])
for eqn in dataBase.items(): #Second, label which equation is for which row
row += 1
myMatrix[row].append(eqn[0])
for item in unknown.items(): #Fill in the 1 and 0
if item[0] in eqn[1]:
myMatrix[row].append(1)
else:
myMatrix[row].append(0)
row = 0
for eqn in myMatrix[1:]: #Third, have it search for the 1 step solutions & non-useable equations.
row += 1
print(eqn[1:])
if sum(eqn[1:]) == 0: #Weed out the eqns that are pure 0
print(' =0')
del myMatrix[row]
row -= 1
if (1 not in eqn[1:-1]) and (eqn[-1] == 1): #Delete all that contain only my goal & store them (which is the last one in the list. Always.)
print(' Only Goal')
wayCount += 1
col = np.argmax(np.array(eqn[1:])) + 1 #Get the positio of the 1
temp = [myMatrix,myMatrix[row][0],myMatrix[0][col]] #[current myMatrix, eqn, var to solve for]
del myMatrix[row]
earlyPaths.update({'way'+str(wayCount):temp})
row -= 1
if sum(eqn[1:]) == 1:
print(' =1')
wayCount += 1
col = np.argmax(np.array(eqn[1:])) + 1 #Get the positio of the 1
temp = [-1,myMatrix[row][0],myMatrix[0][col]] #[current myMatrix, eqn, var to solve for]
myMatrixTemp = copy.deepcopy(myMatrix)
del myMatrixTemp[row] #Delete the row
temp[0] = myMatrixTemp
for i in range(len(myMatrix)): #Delete the column
del myMatrix[i][col]
paths.update({'way'+str(wayCount):temp})
row -= 1
row, passedAlready = 0, False
print(stop)
for k in range(len(unknown)-1):#Fourth. This loop will end just before only the goal var remains
for item in paths.items():
myMatrix = item[1][0][:]
for eqn in myMatrix[1:]: #Find the equations with 1 unknown.
row += 1
if sum(eqn[1:]) == 1:
col = np.argmax(np.array(eqn[1:])) + 1 #Get the positio of the 1
temp[1].append(myMatrix[row][0]) #Add eqn
temp[2].append(myMatrix[0][col]) #Add var
del myMatrix[row] #Delete the row
for i in range(len(myMatrix)): #Delete the column
del myMatrix[i][col]
temp[0] = myMatrix #Update current myMatrix
row -= 1
if passedAlready == False:
paths.update({item[0]:temp})
else: #There is another way that branches off of this one.
wayCount += 1
paths.update({'way'+str(wayCount):temp})
passedAlready = True
print(myMatrix)
print('\n','early',earlyPaths)
print('\n','paths',paths)
print(stop)
#{way1: [[eqnsLeft],[[varAvailable],[varToSolveFor]],[eqnsPathway]], way2: ...
#eqnsPathway: [[eqn1,var1],[eqn2,var2],...
def solver(self,eqns):
"""
This takes all the equations given to it and does either 'Linear Solve' or 'Gauss-Sidel' until it finds an answer.
If it diverges during Gauss-Sidel, it re-arranges the equations using sympy.
It saves all the variables to the class function.
If there are multiple ways to solve the problem, it chooses one at random, and if that doesn't work it deletes it.
It then tries the next way. Theoretically, any way that is provided it should work. This is just a precaution.
~~~ A future update could handle requests for alternative ways to solve it.
Note: This does not handle gaussSidel equations yet.
~~~ For the goal, it would be nice if it returned (1) a rounded answer, and (2) in the units your goal is in.
"""
print('Solving')
print(self.equations)
for item in self.equations:
answer = self.ridder(self,item[0]+'Eqn',item[1])
setattr('self',item[1],answer[0])
if item != self.equations[-1]:
print('I used equation ',item[0],' and solved for ',item[1],'. The answer was ',answer[0],' with a percent error of ',answer[1])
else:
print('I used equation ',item[0],' and solved for ',item[1],', your goal. The answer was ',answer[0],' with a percent error of ',answer[1])
print('Thank You for using our program.')
def f(self,fn):
"""
This function simply runs a function and returns the answer.
"""
if self.subject == 'thermo':
return LogicThermoEquations.fn(self)
# elif self.subject == 'statics':
# return LogicStaticsEquations.fn(self)
def ridder(self,eq,var,guess=[-10*10**90,10*10**90],erdes=0.00001):
"""
Solves one equation for one unknown using ridder's methood.
'eq' is the equation to be solved.
'var' is the variable that is being solved for.
'guess' is the bounds which the answer is in.
'erdes' is the desired error
"""
x1, x2, erdes = guess[0], guess[1], erdes/100
n = math.ceil(math.log(abs(x1-x2)/erdes)/math.log(2))
for i in range(int(n)):
setattr('self',var,x1)
f1 = f(eq)
setattr('self',var,x2)
f2 = f(eq)
x3 = (x1+x2)/2
setattr('self',var,x3)
f3 = f(eq)
x4 = x3+(x3-x1)*np.sign(f1-f2)*f3/math.sqrt(f3**2-f1*f2)
setattr('self',var,x4)
f4 = f(eq)
if f3*f4<0: x1,x2 = x3,x4
elif f1*f4<0: x2 = x4
elif f2*f4<0: x1 = x4
else: break
error = abs((x1-x2)/x2)*100
return x4,error
def gauss(self,eqns,erdes=0.00001):
"""
'eqns' is the equation names. [eq1,eq2,eq3]. They are solved in that order.
'erdes' is the desired error.
Example Input: gauss([f1,f2],0.01)
"""
noFun = len(eqns)
var = [3.14688]*noFun
varNew = var[:]
error = [10000]*noFun
varDif=[3.14688]*noFun
nHistory =[]
count = 0
while max(error) > erdes:
for i in range(noFun): varNew[i] = f(eqns[i],var) #solve each function
for i in range(noFun): error[i],varDif[i] = abs((varNew[i]-var[i])/varNew[i]),(abs(varNew[i]-var[i])/2)
for i in range(noFun):
if varDif[i]==max(varDif):
n=i
nHistory.append(varNew[n])
count,var = count + 1,copy.deepcopy(varNew)
if count == 10: #This Must always be an even Number. #It hasn't begun to converge within 10 iterations. So, Is it diverging?
var = [3.14688]*noFun
varNew = var[:]
halfLength = len(nHistory)/2
firstHalf = 0
secondHalf = 0
for i in range(int(halfLength)):
firstHalf = firstHalf + nHistory[i]
secondHalf = secondHalf + nHistory[i+int(halfLength)]
half1 = firstHalf/halfLength
half2 = secondHalf/halfLength
if abs(half1) < abs(half2):
print('This function Diverges. Re-do.')
sys.exit(0)
# else:
# print('It converges. I shall continue.')
return varNew,error
```
#### File: source/logic/logicThermoEquations.py
```python
class LogicThermoEquations:
""" This regulates all the Thermo Equations."""
def constants():
#All units must be previously changed to SI metric
Ru = 8.31447 #kJ/kmol*K Universal Gas Constant
g = 9.81 #m/s2 Standard Acceleration of Gravity
Patm = 101.325 #kPa Standard Atmospheric Pressure
sigmaSB = 5.6704*10**-8 #W/m2*K4 Stefan-Boltzmann Constant
kSB = 1.380650*10**-23 #J/K Boltzmann's Constant
c0 = 2.9979*10**8 #m/s Speed of Light in a Vacuume
c = 331.36 #m/s Speed of Sound in Dry Air
hIF = 333.7 #kJ/kg Heat of Fusion of Water
hFG = 2256.5 #kJ/kg Enthalpy of Vaporization of Water
return ({'Ru':Ru,'g':g,'Patm':Patm,'sigmaSB':sigmaSB,'kSB':kSB,'c0':c0,'c':c,'hIF':hIF,'hFG':hFG})
def eqnDatabase(self):
#A dictionary containing what each equation involves.
#This will be used to plan an equation corse.
return {
'roeVA1': ['mdot1','roe','Velo1','A1'], 'roeVdot1': ['mdot1','roe','Vdot1'],
'vRoe1': ['v1','roe'], 'hupv1': ['hi','u1','P1','v1'], 'pvrt1': ['P1','v1','R','T1'],
'pvmrt1': ['P1','V1','m1','R','T1'],
'roeVA2': ['mdot2','roe','Velo2','A2'], 'roeVdot2': ['mdot2','roe','Vdot2'],
'vRoe2': ['v2','roe'], 'hupv2': ['he','u2','P2','v2'], 'pvrt2': ['P2','v2','R','T2'],
'pvmrt2': ['P2','V2','m2','R','T2'],
'Tconst': ['T1','T2'], 'Pconst': ['P1','P2'], 'hconst': ['hi','he'], 'uconst':['u1','u2'], 'vconst':['v1','v2'],
'Vconst': ['V1','V2'], 'mconst': ['m1','m2'], 'kcpcv': ['k','Cp','Cv'],
'cpcvr': ['Cp','Cv','R'], 'deltaU': ['u2','u1','Cv','T2','T1'],
'deltaH': ['he','hi','Cp','T2','T1'], 'deltaHIncom1': ['he','hi','Cp','v1','T2','T1','P2','P1'],
'wbIntEqn': ['Wb','P1','V2','V1'], 'wbDeltaVEqn': ['Wb','P1','V2','V1'],
'wbn': ['Wb','P2','P1','V2','V1','k'], 'wbnmr': ['Wb','T2','T1','m1','R','k'],
'wbnm': ['Wb','P2','P1','v2','v1','m1','R','k'], 'wbn1v': ['Wb','P1','V2','V1'],
'wbn1p': ['Wb','P2','P1','V1'], 'wbn1mrv': ['Wb','V2','V1','m1','R','T1'],
'wbn1mrp': ['Wb','P2','P1','m1','R','T1'],
'wbTotal': ['W','Wb','We','Ws'],
'we': [], 'ws': [],
'EnergyBalance': ['Q','W','mi','me','hi','he','ki_v','ke_v','pi_h','pe_h','m2','m1','u2','u1','k2_v','k1_v','p2_h','p1_h'],
'effThWQh': ['effTh','W','Qh'], 'effThQhQl': ['effTh','Qh','Ql'],
'effThWQhQl': ['W','Qh','Ql'], 'copHpWQh': ['copHp','Qh','W'],
'copRefWQh': ['copRef','Ql','W'], 'copRefQhQl': ['copRef','Qh','Ql'],
'copRefWQhQl': ['W','Qh','Ql'], 'copHpRef': ['copHp','copRef']
}
def roeVA1Eqn(self):
"""mdot=roe*Velo*A"""
return self.roe1*self.Velo1*self.A1-self.mdot1
def roeVdot1Eqn(self):
"""mdot=roe*Vdot"""
return self.roe1*self.Vdot1-self.mdot1
def vRoe1Eqn(self):
"""roe = 1/v"""
return 1/self.v1-self.roe1
def hupv1Eqn(self):
"""h=u+P*v"""
return self.u1+self.P1*self.v1-self.h1
##Ideal Gas Equations
def pvrt1Eqn(self):
"""Pv=RT"""
return self.P1*self.v1-self.R1*self.T1
def pvmrt1Eqn(self):
"""PV=mRT"""
return self.P1*self.V1-self.m1*self.R1*self.T1
def roeVA2Eqn(self):
"""mdot=roe*Velo*A"""
return self.roe2*self.Velo2*self.A2-self.mdot2
def roeVdot2Eqn(self):
"""mdot=roe*Vdot"""
return self.roe2*self.Vdot2-self.mdot2
def vRoe2Eqn(self):
"""roe = 1/v"""
return 1/self.v2-self.roe2
def hupv2Eqn(self):
"""h=u+P*v"""
return self.u2+self.P2*self.v2-self.h2
##Ideal Gas Equations
def pvrt2Eqn(self):
"""Pv=RT"""
return self.P2*self.v2-self.R2*self.T2
def pvmrt2Eqn(self):
"""PV=mRT"""
return self.P2*self.V2-self.m2*self.R2*self.T2
def TconstEqn(self):
"""T1=T2"""
return self.T1-self.T2
def PconstEqn(self):
"""P1=P2"""
return self.P1-self.P2
def hconstEqn(self):
"""he=hi"""
return self.he-self.hi
def uconstEqn(self):
"""u1=u2"""
return self.u1-self.u2
def vconstEqn(self):
"""v1=v2"""
return self.v1-self.v2
def VconstEqn(self):
"""V1=V2"""
return self.V1-self.V2
def mconstEqn(self):
"""m1=m2"""
return self.m1-self.m2
def kcpcvEqn(self):
"""k=Cp/Cv"""
return self.k-self.Cp/self.Cv
def cpcvrEqn(self):
"""Cp=Cv+R"""
return self.Cv+self.R-self.Cp
def deltaUEqn(self):
"""u2-u1=Cv*(T2-T1) if Cv is constant"""
return self.Cv*(self.T2-self.T1)-(self.u2-self.u1)
def deltaHEqn(self):
"""h2-h1=Cp*(T2-T1) if Cp is constant"""
return self.Cp*(self.T2-self.T1)-(self.h2-self.h1)
##Incompressible Equations
def deltaHIncomEqn(self):
"""h2-h1=C*(T2-T1)+v*(P2-P1)"""
return self.Cp*(self.T2-self.T1)+self.v1*(self.P2-self.P1)-(self.h2-self.h1)
##Boundary Work Equations
def wbIntEqn(self):
"""Wb=Integral(P*dV,V1,V2)"""
ans,err = integral.quad(self.P1,self.V1,self.V2)
return ans-self.Wb
def wbDeltaVEqn(self):
"""Wb=P*(V2-V1)"""
return self.P1*(self.V2-self.V1)-self.Wb
###Ideal Gas Boundary Work Equations
def wbnEqn(self):
"""Wb=(P2*V2-P1*V1)/(1-n)"""
return (self.P2*self.V2-self.P1*self.V1)/(1-self.k)-self.Wb
def wbnmrEqn(self):
"""Wb=m*R*(T2-T1)/(1-n)"""
return self.m1*self.R*(self.T2-self.T1)/(1-self.k)-self.Wb
def wbnmEqn(self):
"""Wb = m*(P2*v2-P1*v1)/(1-n)"""
return self.m1*(self.P2*self.v2-self.P1*self.v1)/(1-self.k)-self.Wb
def wbn1vEqn(self):
"""Wb=P1*V1*ln(V2/V1)"""
return self.P1*self.V1*math.log(self.V2/self.V1)-self.Wb
def wbn1pEqn(self):
"""Wb=P1*V1*ln(P1/P2)"""
return self.P1*self.V1*math.log(self.P1/self.P2)-self.Wb
def wbn1mrvEqn(self):
"""Wb = m*R*T*ln(V2/V1)"""
return self.m1*self.R*self.T1*log(self.V2/self.V1)-self.Wb
def wbn1mrpEqn(self):
"""Wb = m*R*T*ln(P1/P2)"""
return self.m1*R*T1*log(P1/P2)-Wb
def wTotalEqn(self):
"""W = Wb+Ws+We"""
return self.Wb+self.Ws+self.We-self.W
def weEqn(self):
"""Not in Yet"""
pass
def wsEqn(self):
"""Not in Yet"""
pass
##Energy Balance
def EnergyBalanceEqn(self):
"""Q-W+mi*(h+ke+pe)i-me*(h+ke+pe)e=m2*(u+ke+pe)2-m1*(u+ke+pe)1"""
return self.Q-self.W+self.mi*(self.hi+self.ki+self.pi)-self.me*(self.he+self.ke+self.pe)-(self.m2*(self.u2+self.k2+self.p2)-self.m1*(self.u1+self.k1+self.p1))
##Efficency
def effThWQhEqn(self):
"""effTh = W/Qh"""
return self.W/self.Qh-self.effTh
def effThQhQlEqn(self):
"""effTh = (Qh-Ql)/Qh"""
return (self.Qh-self.Ql)/self.Qh-self.effTh
def effThWQhQlEqn(self):
"""(Qh-Ql)/Qh = W/Qh"""
return (self.Qh-self.Ql)/self.Qh-self.W/self.Qh
def copHpWQhEqn(self):
"""copHp = Qh/W"""
return self.Qh/self.W-self.copHp
def copHpQhQlEqn(self):
"""copHp = 1-Ql/Qh"""
return 1-self.Ql/self.Qh-self.copHp
def copHpWQhQlEqn(self):
"""Qh/W=1-Ql/Qh"""
return 1-self.Ql/self.Qh-self.Qh/self.W
def copRefWQhEqn(self):
"""copRef = Qh/W"""
return self.Ql/self.W-self.copRef
def copRefQhQlEqn(self):
"""copRef = 1-Ql/Qh"""
return 1/(self.Qh/self.Ql-1)-self.copRef
def copRefWQhQlEqn(self):
"""Qh/W=1-Ql/Qh;"""
return 1-self.Ql/self.Qh-self.Qh/self.W
def copHpRefEqn(self):
"""copHp=copRef+1"""
return self.copRef+1-self.copHp
```
#### File: source/views/frmThermoTableLookup.py
```python
import wx
class Frm_ThermoTableLookup ( wx.Frame ):
def __init__( self, parent ):
self.input = [-1,[-1,-1,[[-1,-1],[-1,-1]]]]
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 435,209 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
sz_TT_Main = wx.FlexGridSizer( 0, 1, 0, 0 )
sz_TT_Main.SetFlexibleDirection( wx.BOTH )
sz_TT_Main.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
sz_TT_Medium = wx.FlexGridSizer( 0, 4, 0, 0 )
sz_TT_Medium.SetFlexibleDirection( wx.BOTH )
sz_TT_Medium.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TT_Title = wx.StaticText( self, wx.ID_ANY, u"Thermo Lookup", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TT_Title.Wrap( -1 )
self.txt_TT_Title.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), 70, 90, 92, False, wx.EmptyString ) )
sz_TT_Medium.Add( self.txt_TT_Title, 0, wx.ALL, 5 )
self.txt_TT_Spacer = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TT_Spacer.Wrap( -1 )
sz_TT_Medium.Add( self.txt_TT_Spacer, 0, wx.ALL, 5 )
self.txt_TT_MediumInput = wx.StaticText( self, wx.ID_ANY, u"Medium", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TT_MediumInput.Wrap( -1 )
sz_TT_Medium.Add( self.txt_TT_MediumInput, 0, wx.ALL, 5 )
choice_TT_MediumChoices = ['','Water','R134a','Air','Ammonia','Argon','Benzene','Bromine','n-Butane','Carbon dioxide','Carbon monoxide','Carbon tetrachloride','Chlorine','Chloroform','R12','R21','Ethane','Ethyl alcohol','Ethylene','Helium','n-Hexane','Hydrogen','Krypton','Methane','Methyl alcohol','Methyl chloride','Neon','Nitrogen','Nitrous oxide','Oxygen','Propane','Propylene','Sulfur dioxide','R11','Xenon']
self.choice_TT_Medium = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, choice_TT_MediumChoices, 0 )
self.choice_TT_Medium.SetSelection( 0 )
sz_TT_Medium.Add( self.choice_TT_Medium, 0, wx.ALL, 5 )
self.txt_TT_Goal_ColOf = wx.StaticText( self, wx.ID_ANY, u"I want to know", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TT_Goal_ColOf.Wrap( -1 )
sz_TT_Medium.Add( self.txt_TT_Goal_ColOf, 0, wx.ALL, 5 )
medium, choices = [],[]
self.choice_TT_Goal_ColOf = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, choices, 0 )
self.choice_TT_Goal_ColOf.SetSelection( 1 )
sz_TT_Medium.Add( self.choice_TT_Goal_ColOf, 0, wx.ALL, 5 )
self.txt_TT_Goal_RowOf = wx.StaticText( self, wx.ID_ANY, u"of", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TT_Goal_RowOf.Wrap( -1 )
sz_TT_Medium.Add( self.txt_TT_Goal_RowOf, 0, wx.ALL, 5 )
mediumValue = self.choice_TT_Medium.GetString(self.choice_TT_Medium.GetSelection())
self.txt_TT_Goal_RowOf = wx.StaticText( self, wx.ID_ANY, mediumValue, wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TT_Goal_RowOf.Wrap( -1 )
sz_TT_Medium.Add( self.txt_TT_Goal_RowOf, 0, wx.ALL, 5 )
self.txt_TT_Ref_ColOf = wx.StaticText( self, wx.ID_ANY, u"Using a", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TT_Ref_ColOf.Wrap( -1 )
sz_TT_Medium.Add( self.txt_TT_Ref_ColOf, 0, wx.ALL, 5 )
self.choice_TT_Ref_ColOf = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, choices, 0 )
self.choice_TT_Ref_ColOf.SetSelection( 1 )
sz_TT_Medium.Add( self.choice_TT_Ref_ColOf, 0, wx.ALL, 5 )
self.txt_TT_Ref_RowOf = wx.StaticText( self, wx.ID_ANY, u"of", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TT_Ref_RowOf.Wrap( -1 )
sz_TT_Medium.Add( self.txt_TT_Ref_RowOf, 0, wx.ALL, 5 )
self.txtCtrl_TT_Ref_RowOf = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TT_Medium.Add( self.txtCtrl_TT_Ref_RowOf, 0, wx.ALL, 5 )
self.txt_TT_Ref_ColOf = wx.StaticText( self, wx.ID_ANY, u"And a", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TT_Ref_ColOf.Wrap( -1 )
sz_TT_Medium.Add( self.txt_TT_Ref_ColOf, 0, wx.ALL, 5 )
self.choice_TT_Ref2_ColOf = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, choices, 0 )
self.choice_TT_Ref2_ColOf.SetSelection( 1 )
sz_TT_Medium.Add( self.choice_TT_Ref2_ColOf, 0, wx.ALL, 5 )
self.txt_TT_Ref2_RowOf = wx.StaticText( self, wx.ID_ANY, u"of", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TT_Ref2_RowOf.Wrap( -1 )
sz_TT_Medium.Add( self.txt_TT_Ref2_RowOf, 0, wx.ALL, 5 )
self.txtCtrl_TT_Ref2_RowOf = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TT_Medium.Add( self.txtCtrl_TT_Ref2_RowOf, 0, wx.ALL, 5 )
self.btn_TT_Go = wx.Button( self, wx.ID_ANY, u"Go!", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TT_Medium.Add( self.btn_TT_Go, 0, wx.ALL, 5 )
self.txt_TT_Spacer2 = wx.StaticText( self, wx.ID_ANY, u" ", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TT_Spacer2.Wrap( -1 )
sz_TT_Medium.Add( self.txt_TT_Spacer2, 0, wx.ALL, 5 )
self.txt_TT_Answer = wx.StaticText( self, wx.ID_ANY, u"Answer:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TT_Answer.Wrap( -1 )
sz_TT_Medium.Add( self.txt_TT_Answer, 0, wx.ALL, 5 )
self.txt_TT_AnswerValue = wx.StaticText( self, wx.ID_ANY, u" ", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TT_AnswerValue.Wrap( -1 )
sz_TT_Medium.Add( self.txt_TT_AnswerValue, 0, wx.ALL, 5 )
sz_TT_Main.Add( sz_TT_Medium, 1, wx.EXPAND, 5 )
self.SetSizer( sz_TT_Main )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.choice_TT_Medium.Bind( wx.EVT_CHOICE, self.onChoiceMedium )
self.choice_TT_Goal_ColOf.Bind( wx.EVT_CHOICE, self.onChoiceGoalColOf )
self.choice_TT_Ref_ColOf.Bind( wx.EVT_CHOICE, self.onChoiceRefColOf )
self.txtCtrl_TT_Ref_RowOf.Bind( wx.EVT_TEXT, self.onTxtRefRowOf )
self.choice_TT_Ref2_ColOf.Bind( wx.EVT_CHOICE, self.onChoiceRef2ColOf )
self.txtCtrl_TT_Ref2_RowOf.Bind( wx.EVT_TEXT, self.onTxtRef2RowOf )
self.btn_TT_Go.Bind( wx.EVT_BUTTON, self.onBtnClickGo )
def __del__( self ):
pass
``` |
{
"source": "JoshMayberry/Numerical_Methods",
"score": 4
} |
#### File: Numerical_Methods/CurveFitting/Curve_Fit.py
```python
import math
import numpy as np
import matplotlib.pyplot as plt
def cfit(x,y,filename,flag,fitType):
"""
'x' is the x-points
'y' is the y-points
'filename' is an external file loaction that can be loaded.
'flag' determines wether to use the points given (1) or load from an external file (2).
'fitType' is a string for the curve fit. It can be one of the following:
'line' - straight line curve fit
'log' - logrithmic curve fit
'power' - power law curve fit
'exp' - exponential curve fit
'second' - second order polynomial curve fit
'third' - third order polynomial curve fit
'fourth' - fourth order polynomial curve fit
'all' - all the curve fits listed above
'best' - first and second options for best curve fits
'temp' is a temporary variable that does various things throught the program, being constantly overwritten.
Example Input: cfit(x,y,'data.txt',2,'all')
Example Input: cfit(np.array([0.5,0.9,1.7,2.4]),np.array([8.7,9.3,10.6,12.1]),'dummy',1,'line')
"""
temp = check(x,y,filename,flag,fitType)
x,y,fitType = temp[0],temp[1],temp[2]
if temp[3] == 1:
if (fitType == 'all') or (fitType =='best'):
temp = [(lineGenerator(x,y,'line'))]
temp.append(lineGenerator(x,y,'log'))
temp.append(lineGenerator(x,y,'power'))
temp.append(lineGenerator(x,y,'exp'))
temp.append(lineGenerator(x,y,'second'))
temp.append(lineGenerator(x,y,'third'))
temp.append(lineGenerator(x,y,'fourth'))
if fitType == 'best':
temp = optimizer(np.transpose(np.array(temp)).tolist())
else:
temp = np.transpose(np.array(temp)).tolist()
temp[1] = (np.array(temp[1]).flatten().tolist())
else:
temp = lineGenerator(x,y,fitType)
showplots(x,y,temp,fitType)
eqn = temp[1]
r2 = temp[0][1]
return eqn,r2
else: print('failed the check') #program ends
#The Black Box
def check(x,y,filename,flag,fitType):
"""This checks that the inputs will work.
'x' is the flagged x list to use.
'y' is the flagged y list to use.
'n' is wether the other checks failed or not.
Example Input: optimizer(np.array([0.5,0.9,1.7,2.4]),np.array([8.7,9.3,10.6,12.1]),2,0)
"""
n = 1
if flag == 2:
data = np.loadtxt(filename)
x = data[:,0]
y = data[:,1]
if len(x) > len(y):
print('Your x-list is larger than your y-list.')
n = 0
elif len(y) > len(x):
print('Your y-list is larger than your x-list.')
n = 0
if type(x) != np.ndarray:
print("Your x-list is not an np.array. I'll fix that for you")
x = np.array(x)
if type(y) != np.ndarray:
print("Your y-list is not an np.array. I'll fix that for you")
y = np.array(x)
fitType = fitType.lower() #Gets rid of any accidental caps for you
if fitType not in ('line','log','power','exp','second','third','fourth','all','best'):
print('I do not understand the fitType you gave me. Please make sure you spelled it correctly.')
n = 0
return [x,y,fitType,n]
def lineGenerator(x,y,fitType):
"""This determines which type of trendline(s) should be computed."""
if (fitType == 'line') or (fitType == 'log') or (fitType == 'power') or (fitType == 'exp'):
solved = trendline(x,y,fitType)
eqn = equation(solved,fitType)
elif fitType == 'second':
solved = trendline(x,y,2)
eqn = equation(solved,fitType)
elif fitType == 'third':
solved = trendline(x,y,3)
eqn = equation(solved,fitType)
elif fitType == 'fourth':
solved = trendline(x,y,4)
eqn = equation(solved,fitType)
return [solved,[eqn]]
def optimizer(myList):
"""Finds the best two fits for the trendlines"""
eqns = np.transpose(np.array(myList[1])).tolist()
myList = np.transpose(np.array(myList[0])).tolist()
solvedMax,r2Max,eqnMax = [],[],[]
for i in range(2):
n = np.array(myList[1]).flatten().argmax()
solvedMax.append(myList[0][i])
r2Max.append(myList[1][i])
eqnMax.append(eqns[0][i])
myList[1][n] = [0]
myList[0][n] = [0]
return [[solvedMax,r2Max],eqnMax]
def trendline(x,y,fitType):
"""This creates various types of trendlines from lists of data in the form of numpy arrays.
It returns the a & b values as a list, as well as the r2 value.
Example Input: trendline(np.array([0.5,0.9,1.7,2.4]),np.array([8.7,9.3,10.6,12.1]),'line')
"""
if type(fitType)==str:
n = len(x)
if fitType == 'line':
sumx = np.sum(x)
sumy = np.sum(y)
sumxy = np.sum(x*y)
sumx2 = np.sum(x**2)
sumy2 = np.sum(y**2)
elif fitType == 'power':
sumx = np.sum(np.log(x))
sumy = np.sum(np.log(y))
sumxy = np.sum(np.log(x)*np.log(y))
sumx2 = np.sum(np.log(x)**2)
sumy2 = np.sum(np.log(y)**2)
elif fitType == 'exp':
sumx = np.sum(x)
sumy = np.sum(np.log(y))
sumxy = np.sum(x*np.log(y))
sumx2 = np.sum(x**2)
sumy2 = np.sum(np.log(y)**2)
elif fitType == 'log':
sumx = np.sum(np.log(x))
sumy = np.sum(y)
sumxy = np.sum(np.log(x)*y)
sumx2 = np.sum(np.log(x)**2)
sumy2 = np.sum(y**2)
A = np.array([[n,sumx],[sumx,sumx2]])
b = np.array([[sumy],[sumxy]])
solved = np.linalg.solve(A,b)
r2 = (solved[0,0]*sumy+solved[1,0]*sumxy-1/n*sumy**2)/(sumy2-1/n*sumy**2)
if fitType == 'power': solved[0,0] = np.exp(solved[0,0])
elif fitType == 'exp': solved[0,0] = np.exp(solved[0,0])
else:
#The fitType is the order that the polynomial is.
sumxList,sumyList,b,r2 = [],[],[],0
n = len(x)
A = [[n]]
for i in range(fitType*2): #Create a list that ranges from x^1 to x^n
sumxList.append(np.sum(x**(i+1)))
sumyList.append(np.sum(x**(i)*y))
for i in range(fitType): #Initialize the A
A.append([sumxList[i]])
for j in range(fitType+1):#Set up the A and b
for i in range(fitType):
A[j].append(sumxList[i+j])
b.append(sumyList[j])
A = np.array(A)
b = np.array(b).transpose()
solved = np.linalg.solve(A,b)
for i in range(fitType+1):
r2 += solved[i]*sumyList[i]
r2 = (r2-sumyList[0]**2/n)/(np.sum(y**2)-1/n*sumyList[0]**2)
return [solved.tolist(),[r2]]
def equation(solved,fitType):
"""This creates an equation for the trendline
Example Input: equation([[[7.7307334109429595], [1.7776484284051208]], 0.99394696088416035],'line')"""
if fitType == 'line': eqn = ('y='+str(solved[0][0][0])+'+'+str(solved[0][1][0])+'*x')
elif fitType == 'power': eqn = ('y='+str(solved[0][0][0])+'*x**('+str(solved[0][1][0])+')')
elif fitType == 'exp': eqn = ('y='+str(solved[0][0][0])+'*np.exp('+str(solved[0][1][0])+'*x)')
elif fitType == 'log': eqn = ('y='+str(solved[0][0][0])+'+'+str(solved[0][1][0])+'*np.log(x)')
else:
if fitType == 'second': n = 2
elif fitType == 'third': n = 3
elif fitType == 'fourth': n = 4
eqn = 'y='+str(solved[0][0])+'+'+str(solved[0][1])+'*x'
for i in range(n-1): eqn += '+'+str(solved[0][i+2])+'*x**'+str(i+2)
return eqn
def showplots(xlist,ylist,answer,fitType):
"""This creates a dynamic plot(s) for the trendlines.
Example Input: optimizer(np.array([0.5,0.9,1.7,2.4]),np.array([8.7,9.3,10.6,12.1]),2,0)
"""
myLegends = ['given']
h = 50 #step scalar
x = np.arange(xlist.min(),xlist.max(),(xlist.max()-xlist.min())/h)
plt.figure(1,figsize=(8,4))
plt.plot(xlist,ylist,'ko')
for j in range(len(answer[1])):
if j<7:
if j%2 == 0:
if j == 0: lnStl = 'Purple'
elif j == 2: lnStl = 'OrangeRed'
elif j == 4: lnStl = 'y-'
elif j == 6: lnStl = 'b-'
else:
if j == 1: lnStl = 'r-'
elif j == 3: lnStl = 'Orange'
elif j == 5: lnStl = 'g-'
else: lnStl = 'w--'
if fitType == 'best':
plt.figure(2,figsize=(16,8))
plt.subplot(2,1,j+1)
if 'e**' in answer[1][j]: myLegends.append('exp')
elif 'log' in answer[1][j]: myLegends.append('log')
elif '**4' in answer[1][j]: myLegends.append('fourth')
elif '**3' in answer[1][j]: myLegends.append('third')
elif '**2' in answer[1][j]: myLegends.append('second')
elif '**' in answer[1][j]: myLegends.append('power')
else: myLegends.append('line')
elif fitType == 'all':
plt.figure(2,figsize=(16,8))
plt.subplot(3,3,j+1)
if j == 0: myLegends.append('line')
elif j == 1: myLegends.append('log')
elif j == 2: myLegends.append('power')
elif j == 3: myLegends.append('exp')
elif j == 4: myLegends.append('second')
elif j == 5: myLegends.append('third')
elif j == 6: myLegends.append('fourth')
else: myLegends.append(fitType)
plt.plot(xlist,ylist,'ko')
plt.plot(x,eval(answer[1][j].strip('y=')),lnStl)
plt.figure(1)
plt.plot(x,eval(answer[1][j].strip('y=')),lnStl)
plt.figure(1)
plt.legend(myLegends)
if fitType == 'best':
plt.figure(2)
for j in range(len(answer[1])):
plt.subplot(2,1,j+1)
plt.legend(['given',myLegends[j+1]])
elif fitType == 'all':
plt.figure(2)
for j in range(len(answer[1])):
plt.subplot(3,3,j+1)
plt.legend(['given',myLegends[j+1]])
if fitType in ('all','best'):
plt.figure(1).canvas.set_window_title('All Lines')
plt.figure(2).canvas.set_window_title('Individual Lines')
else: plt.figure(1).canvas.set_window_title('Curve Fit')
plt.show()
```
#### File: Numerical_Methods/GaussSidel/gauss 6.py
```python
import numpy as np
import equations as eq
import math
import sys
#def f1(xlist):
#"""Solves x = 2y+z-s+sqrt(t)"""
#return 2*x[1]+x[2]-x[3]+math.sqrt(x[4])
def f1(myList):
"""Solves x = Sqrt((120-y)/8)"""
return math.sqrt((120-myList[1])/8)
#return 120-8*myList[0]**2
def f2(myList):
"""Solves y = x^3-1"""
return myList[0]**3-1
#return (myList[1]+1)**(1/3)
def f3(mylist)
def f(fn,var):
return fn(var)
def gauss(eqns,erdes=0.025):
"""
'eqns' is the equation names. [eq1,eq2,eq3].
'erdes' is the desired error.
Example Input: gauss([f1,f2],0.01)
"""
noFun = len(eqns)
var = [3.14688]*noFun #[x,y,z]
varNew = var[:]
error = [10000]*noFun
varDif=[3.14688]*noFun
nHistory =[]
count = 0
while max(error) > erdes:
for i in range(noFun):
#solve each function
varNew[i] = f(eqns[i],var)
for i in range(noFun):
error[i] = abs((varNew[i]-var[i])/varNew[i])
varDif[i]=(abs(varNew[i]-var[i])/2)
#print('i:',i,'varNew[i]:',varNew[i],'var[i]:',var[i])
#print('varDif:',varDif)
for i in range(noFun):
if varDif[i]==max(varDif):
n=i
nHistory.append(varNew[n])
#print('n:',n)
#print('count:',count,'error:',error)
count += 1
var = varNew[:]
if count == 6: #This Must always be an even Number. #It hasn't converged within 10 iterations. So, Is it diverging?
var = [3.14688]*noFun #[x,y,z]
varNew = var[:]
halfLength = len(nHistory)/2
firstHalf = 0
secondHalf = 0
for i in range(int(halfLength)):
firstHalf = firstHalf + nHistory[i]
secondHalf = secondHalf + nHistory[i+int(halfLength)]
half1 = firstHalf/halfLength
half2 = secondHalf/halfLength
if abs(half1) < abs(half2):
print('This function Diverges. Re-do.')
sys.exit(0)
else:
print('It converges. I shall continue.')
return varNew,count,error
```
#### File: Numerical_Methods/GaussSidel/gauss.py
```python
import numpy as np
import equations as eq
import math
def f(fn,val):
fx = fn(var)
return fx
def gauss(eqns,mylist,erdes=0.1):
"""
'eqns' is the equation names. [eq1,eq2,eq3].
'mylist' is [x,y,z]. If you want to solve for x, y and z must be constant.
'erdes' is the desired error.
Example Input: gauss([eq.eq6,eq.eq7],[[Left Bound,Right Bound],yConstant,zConstant],0.01)
"""
noFun = len(eqns)
for i in len(mylist)
if type(mylist[i]) == list:
bound1 = mylist[i][0]
bound2 = mylist[i][1]
break
elif i == len(mylist):
print('You missed the wombat. The rebel aliance has died. shoot again, for the wombat has escaped.')
error = 10000
while error > erdes:
for i in range(noFun)
#solve each function
f(eqns[i],mylist)
error = [abs((xnew-xo)/xnew),abs((ynew-yo)/ynew),abs((znew-zo)/znew)]
error to compare: max(error)
#check for divergence
```
#### File: Numerical_Methods/RungeKutta/runge kutta 2.py
```python
import math
def slope(t,y):
"""dy/dt=y*sin^3(t)"""
global count
count +=1
return y*math.sin(t)**3
def rk(fn,t,y,tfinal,h=1):
global count
count = 0
for i in range(tfinal):
#print('t',t,'y',y)
k1 = fn(t,y)
k2 = fn(t+h/2,y+h/2*k1)
k3 = fn(t+h/2,y+h/2*k2)
k4 = fn(t+h,y+h*k3)
t += h
y += h/6*(k1+2*k2+2*k3+k4)
#print('k',k1,k2,k3,k4,'\n')
return (y,t,count)
``` |
{
"source": "JoshMayberry/Security_API",
"score": 3
} |
#### File: JoshMayberry/Security_API/controller.py
```python
__version__ = "1.0.0"
#Import standard elements
import warnings
#Import cryptodome to encrypt and decrypt files
import Cryptodome.Random
import Cryptodome.Cipher.AES
import Cryptodome.PublicKey.RSA
import Cryptodome.Cipher.PKCS1_OAEP
#Required Modules
##py -m pip install
# pycryptodomex
#Controllers
def build(*args, **kwargs):
"""Starts the GUI making process."""
return Security(*args, **kwargs)
class Security():
"""Allows the user to encrypt and decrypt files.
Adapted from: http://www.blog.pythonlibrary.org/2016/05/18/python-3-an-intro-to-encryption/
"""
def __init__(self):
"""Initializes defaults and internal variables."""
#Defaults
self.password = "<PASSWORD>"
#Internal Variables
self.missingPublicKey = True
self.missingPrivateKey = True
def setPassword(self, password):
"""Changes the encryption password.
password (str) - What the encryption password is
Example Input: setPassword("<PASSWORD>")
"""
self.password = password
def generateKeys(self, privateDir = "", publicDir = "", privateName = "privateKey", publicName = "publicKey", autoLoad = True):
"""Creates a private and public key.
privateDir (str) - The save directory for the private key
publicDir (str) - The save directory for the public key
privateName (str) - The name of the private key file
publicName (str) - The name of the public key file
autoLoad (bool) - Automatically loads the generated keys into memory
Example Input: generateKeys()
Example Input: generateKeys(autoLoad = False)
"""
#Create the key
key = Cryptodome.PublicKey.RSA.generate(2048)
encryptedKey = key.exportKey(passphrase = self.password, pkcs=8, protection = "scryptAndAES128-CBC")
#Save the key
with open(privateDir + privateName + ".pem", 'wb') as fileHandle:
fileHandle.write(encryptedKey)
with open(publicDir + publicName + ".pem", 'wb') as fileHandle:
fileHandle.write(key.publickey().exportKey())
#Load the key
if (autoLoad):
self.loadKeys(privateDir, publicDir, privateName, publicName)
def loadKeys(self, privateDir = "", publicDir = "", privateName = "privateKey", publicName = "publicKey"):
"""Creates a private and public key.
privateDir (str) - The save directory for the private key
publicDir (str) - The save directory for the public key
privateName (str) - The name of the private key file
publicName (str) - The name of the public key file
Example Input: loadKeys()
"""
self.loadPrivateKey(privateDir, privateName)
self.loadPublicKey(publicDir, publicName)
def loadPrivateKey(self, directory = "", name = "privateKey"):
"""Loads the private key into memory.
directory (str) - The save directory for the private key
name (str) - The name of the private key file
Example Input: loadPrivateKey()
"""
self.privateKey = Cryptodome.PublicKey.RSA.import_key(
open(directory + name + ".pem").read(), passphrase = self.password)
self.missingPrivateKey = False
def loadPublicKey(self, directory = "", name = "publicKey"):
"""Loads the public key into memory.
directory (str) - The save directory for the public key
name (str) - The name of the public key file
Example Input: loadPublicKey()
"""
self.publicKey = Cryptodome.PublicKey.RSA.import_key(
open(directory + name + ".pem").read())
self.missingPublicKey = False
def encryptData(self, data, directory = "", name = "encryptedData", extension = "db"):
"""Encrypts a string of data to a new file.
If a file by the same name already exists, it replaces the file.
data (str) - The string to encrypt and store
directory (str) - The save directory for the encrypted data
name (str) - The name of the encrypted data
extension (str) - The file extension for the encrypted data
Example Input: encryptData("Lorem Ipsum")
Example Input: encryptData("Lorem Ipsum", extension = "txt")
"""
#Check for keys
if (self.missingPublicKey or self.missingPrivateKey):
warnings.warn(f"Cannot encrypt data without keys for {self.__repr__()}\n Use 'loadKeys()' or 'loadPublicKey() and loadPrivateKey()' first", Warning, stacklevel = 2)
return None
#Format the output path
outputName = f"{directory}{name}.{extension}"
#Format the data
data = data.encode("utf-8")
#Create the file
with open(outputName, "wb") as outputFile:
sessionKey = Cryptodome.Random.get_random_bytes(16)
#Write the session key
cipherRSA = Cryptodome.Cipher.PKCS1_OAEP.new(self.publicKey)
outputFile.write(cipherRSA.encrypt(sessionKey))
#Write the data
cipherAES = Cryptodome.Cipher.AES.new(sessionKey, Cryptodome.Cipher.AES.MODE_EAX)
ciphertext, tag = cipherAES.encrypt_and_digest(data)
outputFile.write(cipherAES.nonce)
outputFile.write(tag)
outputFile.write(ciphertext)
def decryptData(self, directory = "", name = "encryptedData", extension = "db"):
"""Decrypts an encrypted file into a string of data
directory (str) - The save directory for the encrypted data
name (str) - The name of the encrypted data
extension (str) - The file extension for the encrypted data
Example Input: encryptData()
Example Input: encryptData(extension = "txt")
"""
#Check for keys
if (self.missingPublicKey or self.missingPrivateKey):
warnings.warn(f"Cannot decrypt data without keys for {self.__repr__()}\n Use 'loadKeys()' or 'loadPublicKey() and loadPrivateKey()' first", Warning, stacklevel = 2)
return None
#Format the output path
inputName = f"{directory}{name}.{extension}"
#Create the file
with open(inputName, "rb") as inputFile:
endSessionKey, nonce, tag, ciphertext = [ inputFile.read(x)
for x in (self.privateKey.size_in_bytes(), 16, 16, -1) ]
cipherRSA = Cryptodome.Cipher.PKCS1_OAEP.new(self.privateKey)
sessionKey = cipherRSA.decrypt(endSessionKey)
cipherAES = Cryptodome.Cipher.AES.new(sessionKey, Cryptodome.Cipher.AES.MODE_EAX, nonce)
data = cipherAES.decrypt_and_verify(ciphertext, tag)
#Format the output data
data = data.decode("utf-8")
return data
``` |
{
"source": "JoshMayberry/SQLite-Database",
"score": 2
} |
#### File: JoshMayberry/SQLite-Database/db_config.py
```python
import os
import ast
import datetime
import configparser
import MyUtilities.common
NULL = MyUtilities.common.NULL
openPlus = MyUtilities.common.openPlus
#Monkey Patches
configparser.ConfigParser.optionxform = str
class Configuration(MyUtilities.common.EnsureFunctions, MyUtilities.common.CommonFunctions):
"""Used to handle .ini files.
- Both keys and values can have spaces
- Multi-line values must have extra lines indented one line deeper
- Sections and single-line values can be indented with no consequence
- Keys can be separated from values by either = or :
- Keys without values can have no separator
- The separator can have spaces on each side
- Comments can be done using # or ;
___________________ EXAMPLE INI FILE ___________________
[DEFAULT]
scanDelay = %(delay) %(units)
units = ms
[main]
startup_user = admin
[AutoSave]
delay = 1
units = s
[GUI]
delay = 500
________________________________________________________
Use: https://pymotw.com/3/configparser/
Use: https://docs.python.org/3.6/library/configparser.html
Use: https://martin-thoma.com/configuration-files-in-python/#configparser
Use: https://www.blog.pythonlibrary.org/2010/01/01/a-brief-configobj-tutorial/
use: https://www.blog.pythonlibrary.org/2013/10/25/python-101-an-intro-to-configparser/
"""
def __init__(self, default_filePath = None, *, default_values = None, default_section = None, forceExists = False, forceCondition = None,
allowNone = True, interpolation = True, valid_section = None, readOnly = False, defaultFileExtension = None, backup_filePath = None,
knownTypes = None, knownTypesSection = "knownTypes", knownTypeDefault = None, version = None):
"""
allowNone (bool) - Determines what happens if a setting does not have a set value
- If True: Will use None
- If False: Will raise an error during load()
interpolation (bool) - Determines what kind of interpolation can be done in get()
- If True: Extended Interpolation
- If False: Basic Interpolation
- If None: No Interpolation
valid_section (list) - Which sections (excluding DEFAULT) to load
- If str: Will load only that section
- If None: Will load all sections
~ Optionally, variables can be defined in the section given to 'knownTypesSection'
knownTypesSection (str) - Which section is used to store knownTypes
- If None: Will not use a section to get knownTypes from
version (str) - What version the config file must have
- If None: Will not do a version check
- If different: Will replace the config file with the one from *default_filePath*
Example Input: Configuration(self)
Example Input: Configuration(self, source_directory = "database")
Example Input: Configuration(self, defaults = {"startup_user": "admin"})
"""
self.defaultFileExtension = defaultFileExtension or "ini"
self.default_section = default_section or "main"
self.default_filePath = default_filePath or f"settings.{self.defaultFileExtension}"
self.backup_filePath = backup_filePath
self.version = version
if (interpolation):
interpolation = self.MyExtendedInterpolation()
elif (interpolation is not None):
interpolation = configparser.BasicInterpolation()
self.setReset(converters = self.converters, allow_no_value = allowNone,
defaults = default_values or {}, interpolation = interpolation)
self.reset()
# self.config.optionxform = str
self.knownTypeDefault = knownTypeDefault or "_default_"
self.knownTypesSection = knownTypesSection or None
self.knownTypes = knownTypes or {}
self.readOnly = readOnly
self.set_validSection(valid_section)
if (default_filePath):
self.load(forceExists = forceExists, forceCondition = forceCondition)
def setReset(self, *args, **kwargs):
self._reset = (args, kwargs)
def _eval(self, *args, **kwargs):
value = self.config.get(*args, **kwargs)
return ast.literal_eval(value)
def reset(self):
self.config = configparser.ConfigParser(*self._reset[0], **self._reset[1])
self.dataType_catalogue = {
None: self.config.get,
eval: self._eval, "eval": self._eval,
str: self.config.get, "str": self.config.get,
int: self.config.getint, "int": self.config.getint,
float: self.config.getfloat, "float": self.config.getfloat,
bool: self.config.getboolean, "bool": self.config.getboolean,
datetime.datetime: self.config.getdatetime, "datetime": self.config.getdatetime,
}
def __repr__(self):
representation = f"{type(self).__name__}(id = {id(self)})"
return representation
def __str__(self):
output = f"{type(self).__name__}()\n-- id: {id(self)}\n"
return output
def __enter__(self):
return self.config
def __exit__(self, exc_type, exc_value, traceback):
if (traceback is not None):
print(exc_type, exc_value)
return False
def __getitem__(self, key):
self.check_invalidSection(key)
return self.config[key]
def __setitem__(self, key, value):
if (self.readOnly):
raise ReadOnlyError(self)
self.check_invalidSection(key)
self.config[key] = value
def __delitem__(self, key):
if (self.readOnly):
raise ReadOnlyError(self)
self.check_invalidSection(key)
del self.config[key]
def __contains__(self, key):
if (self.check_invalidSection(key, raiseError = False)):
return False
return key in self.config
def keys(self):
if (self.valid_section is None):
return tuple(self.config.keys())
return tuple(section for section in self.config.keys() if (section in self.valid_section))
def values(self):
if (self.valid_section is None):
return tuple(self.config.values())
return tuple(handle for section, handle in self.config.items() if (section in self.valid_section))
def items(self):
if (self.valid_section is None):
return tuple(self.config.items())
return tuple((section, handle) for section, handle in self.config.items() if (section in self.valid_section))
def _asdict(self):
if (self.valid_section is None):
return dict(self.config)
return {key: value for key, value in self.items()}
def check_invalidSection(self, section, *, raiseError = True, valid_section = NULL):
if (valid_section is NULL):
valid_section = self.valid_section
if ((valid_section is not None) and (section not in valid_section) and (not self.has_section(section, valid_section = None))):
if (raiseError):
raise InvalidSectionError(self, section)
return True
def _getType(self, variable, section = None, *, dataType = None):
"""Returns what type to use for the given variable.
Example Input: _getType("delay")
"""
if (dataType is None):
section = section or self.default_section
check_section = False
if ((self.knownTypesSection is not None) and (self.knownTypesSection in self.config.sections())):
if (self.has_setting(variable, self.knownTypesSection)):
function = self.dataType_catalogue.get(self.config[self.knownTypesSection][variable], None)
if (function is not None):
return function
check_section = True
if ((section in self.knownTypes) and (variable in self.knownTypes[section])):
return self.dataType_catalogue[self.knownTypes[section][variable]]
default_section = self.config.default_section
if ((default_section in self.knownTypes) and (variable in self.knownTypes[default_section])):
return self.dataType_catalogue[self.knownTypes[default_section][variable]]
if (variable in self.knownTypes):
return self.dataType_catalogue[self.knownTypes[variable]]
if (check_section and self.has_setting(self.knownTypeDefault, self.knownTypesSection)):
function = self.dataType_catalogue.get(self.config[self.knownTypesSection][self.knownTypeDefault], None)
if (function is not None):
return function
return self.dataType_catalogue[dataType]
def get(self, variable = None, section = None, *, dataType = None, default_values = None, include_defaults = True,
fallback = configparser._UNSET, raw = False, forceSection = False, forceSetting = False, valid_section = NULL):
"""Returns a setting from the given section.
variable (str) - What setting to get
- If list: Will return a dictionary of all settings in the list
- If None: Will return a dictionary of all settings in the section
section (str) - What section to write this setting in
- If None: Will use the default section
dataType (type) - What type the data should be in
- If None: Will read as str, unless the variable is logged in self.knownTypes under 'section' or DEFAULT
default_values (dict) - Local default values; overrides the global default values temporarily
include_defaults (bool) - Determines if the default section should be used as a fallback
raw (bool) - Determines if the value should be returned without applying interpolation
___________________ BASIC INTERPOLATION ___________________
Variables are denoted with a single '%', followed by closed paren
Example: scanDelay = %(delay) %(units)
To use an escaped %: %%
Example: units = %%
___________________ EXTENDED INTERPOLATION ___________________
Variables are denoted with a '$', followed by braces
Example: scanDelay = ${delay} ${units}
Variables from other sections can be used with a ':'
Example: scanDelay = ${delay} ${general:units}
Example Input: get()
Example Input: get("startup_user")
Example Input: get("scanDelay", section = "AutoSave")
Example Input: get("scanDelay", section = "AutoSave", dataType = int)
Example Input: get("startup_window", defaults = {"startup_window": "inventory"})
Example Input: get(("user", "password", "<PASSWORD>"), section = "Database_Admin")
Example Input: get({"Database_Admin": ("user", "password", "port")})
Example Input: get(include_defaults = False)
"""
section = section or self.default_section
self.check_invalidSection(section, valid_section = valid_section)
if (not self.has_section(section)):
section = self.config.default_section
if (variable is None):
if (include_defaults):
variableList = tuple(self[section].keys())
else:
variableList = tuple(self.config._sections[section].keys())
return self.get(variableList, section = section, dataType = dataType, default_values = default_values, fallback = fallback,
raw = raw, forceSetting = forceSetting, forceSection = forceSection, include_defaults = include_defaults, valid_section = valid_section)
if (isinstance(variable, dict)):
answer = {_section: self.get(_variable, section = _section, dataType = dataType, default_values = default_values, fallback = fallback,
raw = raw, forceSetting = forceSetting, forceSection = forceSection, include_defaults = include_defaults, valid_section = valid_section) for _section, _variable in variable.items()}
if (forceSection or len(answer) > 1):
return answer
elif (not answer):
return
return next(iter(answer.values()))
if (not isinstance(variable, (str, int, float))):
answer = {_variable: self.get(_variable, section = section, dataType = dataType, default_values = default_values, fallback = fallback,
raw = raw, forceSetting = forceSetting, forceSection = forceSection, include_defaults = include_defaults, valid_section = valid_section) for _variable in variable}
if (forceSetting or len(answer) > 1):
return answer
elif (not answer):
return
return next(iter(answer.values()))
function = self._getType(variable, section, dataType = dataType)
try:
return function(section, variable, vars = default_values or {}, raw = raw, fallback = fallback)
except (configparser.InterpolationDepthError, configparser.InterpolationMissingOptionError) as error:
print("@Configuration.get", error)
return function(section, variable, vars = default_values or {}, raw = True, fallback = fallback)
except Exception as error:
print("ERROR", [function, section, variable, default_values or {}, raw, fallback])
raise error
def set(self, variable, value = None, section = None, *, valid_section = NULL, save = False):
"""Adds a setting to the given section.
variable (str) - What setting to get
- If list: Wil set each variable in the list to 'value'
- If dict: Will ignore 'value' and set each key to it's given value
section (str) - What section to write this setting in
- If None: Will use the default section
Example Input: set("startup_user", "admin")
Example Input: set("scanDelay", 1000, section = "AutoSave")
Example Input: set({"startup_user": "admin"})
Example Input: set({"AutoSave": {"scanDelay": 1000}})
"""
if (self.readOnly):
raise ReadOnlyError(self)
self.check_invalidSection(section, valid_section = valid_section)
if (isinstance(variable, dict)):
for _variable, _value in variable.items():
if (isinstance(_value, dict)):
for __variable, __value in _value.items():
self.set(__variable, value = __value, section = _variable, valid_section = valid_section, save = save)
else:
self.set(_variable, value = _value, section = section, valid_section = valid_section, save = save)
return
if (not isinstance(variable, (str, int, float))):
for _variable in variable:
self.set(_variable, value = value, section = section, valid_section = valid_section, save = save)
return
section = section or self.default_section
if (not self.config.has_section(section)):
self.config.add_section(section)
if (value is None):
self.config.set(section, variable, "")
else:
self.config.set(section, variable, f"{value}")
if (save):
self.save()
def replaceWithDefault(self, filePath = None, *, forceExists = False, allowBackup = True, mustRead = False):
"""Replaces the file with the backup file, or throws an error
Example Input: replaceWithDefault()
Example Input: replaceWithDefault("database/settings_user.ini")
"""
global openPlus
filePath = filePath or self.default_filePath
if (allowBackup and (self.backup_filePath is not None)):
if (not os.path.exists(self.backup_filePath)):
raise FileExistsError(self.backup_filePath)
self.config.read(self.backup_filePath)
elif (mustRead):
raise ValueError("Could not read from a backup file")
if (forceExists and isinstance(forceExists, dict)):
self.set(forceExists, valid_section = None)
with openPlus(filePath) as config_file:
self.config.write(config_file)
def load(self, filePath = None, *, version = NULL, valid_section = NULL, forceExists = False, forceCondition = None, allowBackup = True):
"""Loads the configuration file.
filePath (str) - Where to load the config file from
- If None: Will use the default file path
valid_section (list) - Updates self.valid_section if not NULL
Example Input: load()
Example Input: load("database/settings_user.ini")
Example Input: load("database/settings_user.ini", valid_section = ("testing",))
"""
if (valid_section is not NULL):
self.set_validSection(valid_section)
if (version is NULL):
version = self.version
filePath = filePath or self.default_filePath
if (not os.path.exists(filePath)):
if ((not allowBackup) or (self.backup_filePath is None)):
raise FileExistsError(filePath)
self.replaceWithDefault(filePath, forceExists = forceExists, allowBackup = allowBackup)
self.config.read(filePath)
if (version is not None):
_version = self.config["DEFAULT"].get("_version_", None)
if (_version != version):
self.replaceWithDefault(filePath, forceExists = forceExists, allowBackup = allowBackup, mustRead = True)
self.config.read(filePath)
__version = self.config["DEFAULT"].get("_version_", None)
if (__version != version):
raise KeyError(f"Reset config, but version still does not match; old: {__version}; new: {_version}; match: {version}")
if (forceCondition is not None):
for variable, value in forceCondition.items():
var_mustBe = self.tryInterpolation(variable, value)
var_isActually = self.get(variable)
if (var_mustBe != var_isActually):
print(f"Forced conditions not met: '{var_mustBe}' is not '{var_isActually}'. Replacing config file with 'forceMatch'")
os.remove(filePath)
self.reset()
return self.load(filePath = filePath, valid_section = valid_section, forceExists = forceExists, forceCondition = None)
def tryInterpolation(self, variable, value, section = None):
return self.config._interpolation.before_get(self.config, section or "DEFAULT", variable, value, self.config.defaults())
def save(self, filePath = None, override_readOnly = False, **kwargs):
"""Saves changes to config file.
filePath (str) - Where to save the config file to
- If None: Will use the default file path
Example Input: save()
Example Input: save("database/settings_user.ini")
"""
global openPlus
if ((not override_readOnly) and self.readOnly):
raise ReadOnlyError(self)
filePath = filePath or self.default_filePath
with openPlus(filePath or self.default_filePath, **kwargs) as config_file:
self.config.write(config_file)
def has_section(self, section = None, *, valid_section = NULL):
"""Returns True if the section exists in the config file, otherwise returns False.
section (str) - What section to write this setting in
- If None: Will use the default section
Example Input: has_section()
Example Input: has_section(section = "AutoSave")
"""
section = section or self.default_section
if (section == self.config.default_section):
return True
return section in self.getSections(valid_section = valid_section, skip_knownTypes = False)
def has_setting(self, variable, section = None, *, checkDefault = False, valid_section = NULL):
"""Returns True if the setting exists in given section of the config file, otherwise returns False.
section (str) - What section to write this setting in
- If None: Will use the default section
checkDefault (bool) - Determines if the section DEFAULT is taken into account
Example Input: has_setting("startup_user")
Example Input: has_setting("scanDelay", section = "AutoSave")
Example Input: has_setting("startup_user", checkDefault = True)
"""
section = section or self.default_section
self.check_invalidSection(section, valid_section = valid_section)
if (checkDefault):
return self.config.has_option(section, variable)
else:
return variable in self.config._sections.get(section, ())
def remove_section(self, section = None, *, valid_section = NULL):
"""Removes a section.
section (str) - What section to write this setting in
- If None: Will remove all sections
Example Input: remove_section("startup_user")
Example Input: remove_section("scanDelay", section = "AutoSave")
"""
if (self.readOnly):
raise ReadOnlyError(self)
if (section is None):
for section in self.getSections():
self.config.remove_section(section)
return
self.check_invalidSection(section, valid_section = valid_section)
self.config.remove_section(section or self.default_section)
def remove_setting(self, variable, section = None, *, valid_section = NULL):
"""Removes a setting from the given section.
section (str) - What section to write this setting in
- If None: Will use the default section
Example Input: remove_setting("startup_user")
Example Input: remove_setting("scanDelay", section = "AutoSave")
"""
if (self.readOnly):
raise ReadOnlyError(self)
self.check_invalidSection(section, valid_section = valid_section)
self.config.remove_option(section or self.default_section, variable)
def getSections(self, *, valid_section = NULL, skip_knownTypes = True):
"""Returns a list of existing sections.
Example Input: getSections()
"""
def yieldSection():
nonlocal self, valid_section, skip_knownTypes
for section in self.config.sections():
if (self.check_invalidSection(section, raiseError = False, valid_section = valid_section)):
continue
if (skip_knownTypes and (section == self.knownTypesSection)):
continue
yield section
###################################
return tuple(yieldSection())
def getDefaults(self):
"""Returns the defaults that will be used if a setting does not exist.
section (str) - What section to write this setting in
- If None: Will use the default section
Example Input: getDefaults()
"""
return self.config.defaults()
def extraBool(self, value, state):
"""Adds a value as an extra possible bool.
Default cases (case-insensative): yes/no, on/off, true/false, 1/0
Example Input: extraBool("sure", True)
Example Input: extraBool("nope", False)
"""
self.ConfigParser.BOOLEAN_STATES.update({value: state})
def set_validSection(self, valid_section = None):
if (valid_section is None):
self.valid_section = None
else:
self.valid_section = (self.config.default_section, *((self.knownTypesSection,) if (self.knownTypesSection is not None) else ()), *self.ensure_container(valid_section))
#Converters
@staticmethod
def convert_datetime(value):
return datetime.datetime.strptime(s, "%Y/%m/%d %H:%M:%S.%f")
converters = {
"datetime": convert_datetime,
}
#Interpolators
class MyExtendedInterpolation(configparser.ExtendedInterpolation):
"""Modified ExtendedInterpolation from configparser.py"""
def _interpolate_some(self, parser, option, accum, rest, section, mapping, depth):
"""The default ExtendedInterpolation does not account for default values in nested interpolations.
ie: The following does not work when get() is given the kwargs 'section = "debugging"' and 'vars = {"filePath_versionDir": "C:/"}').
[admin]
alembicPath = ${filePath_versionDir}/Schema/main/
[debugging]
alembicPath = ${admin:alembicPath}
"""
rawval = parser.get(section, option, raw = True, fallback = rest)
if (depth > configparser.MAX_INTERPOLATION_DEPTH):
raise InterpolationDepthError(option, section, rawval)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c == "{":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
path = m.group(1).split(':')
rest = rest[m.end():]
sect = section
opt = option
try:
if (len(path) is 1):
opt = parser.optionxform(path[0])
v = mapping[opt]
elif (len(path) is 2):
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw = True)
else:
raise configparser.InterpolationSyntaxError(option, section, "More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise configparser.InterpolationMissingOptionError(option, section, rawval, ":".join(path)) from None
if ("$" in v):
self._interpolate_some(parser, opt, accum, v, sect, {**mapping, **dict(parser.items(sect, raw = True))}, depth + 1) # <- This was the only change
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{', "
"found: %r" % (rest,))
def build(*args, **kwargs):
"""Creates a Configuration object."""
return Configuration(*args, **kwargs)
def quiet(*args):
pass
print(*args)
def sandbox():
# config_API = build_configuration()
# # config_API.set("startup_user", "admin")
# # config_API.save("test/test.ini")
# config_API.load("test/test.ini")
# # quiet(config_API.get("startup_user"))
# with config_API as config:
# for section, sectionHandle in config.items():
# for key, value in sectionHandle.items():
# quiet(section, key, value)
user = os.environ.get('username')
config_API = build("M:/Versions/dev/Settings/settings_user.ini", valid_section = user, default_section = user, knownTypes = {"x": bool, "y": bool})
value = config_API.get("startup_user")
print(value, type(value))
# value = config_API.get("x")
# print(value, type(value))
# value = config_API.get("y")
# print(value, type(value))
def main():
"""The main program controller."""
sandbox()
if __name__ == '__main__':
main()
```
#### File: JoshMayberry/SQLite-Database/db_json.py
```python
import os
import abc
import yaml
import contextlib
import collections
import MyUtilities.common
from API_Database.utilities import json
NULL = MyUtilities.common.NULL
openPlus = MyUtilities.common.openPlus
class Config_Base(MyUtilities.common.EnsureFunctions, MyUtilities.common.CommonFunctions, metaclass = abc.ABCMeta):
"""Utility API for json and yaml scripts."""
def __init__(self, default_filePath = None, *, defaultFileExtension = None, override = None, overrideIsSave = None, forceExists = False):
"""
Example Input: JSON_Aid()
Example Input: YAML_Aid()
"""
self.defaultFileExtension = defaultFileExtension
self.default_filePath = default_filePath or f"settings.{self.defaultFileExtension}"
self.filePath_lastLoad = self.default_filePath
self.dirty = None
self.contents = {}
self.contents_override = {}
self.setOverride(override = override, overrideIsSave = overrideIsSave)
if (default_filePath):
self.load(default_filePath, forceExists = forceExists)
def __repr__(self):
representation = f"{type(self).__name__}(id = {id(self)})"
return representation
def __str__(self):
output = f"{type(self).__name__}()\n-- id: {id(self)}\n"
return output
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if (traceback is not None):
print(exc_type, exc_value)
return False
def __getitem__(self, key):
return self.contents[key]
def __setitem__(self, key, value):
self.contents[key] = value
def __delitem__(self, key):
del self.contents[key]
def __contains__(self, key):
return key in self.contents
def read(self, default = None):
for section, catalogue in self.contents.items():
for setting, value in catalogue.items():
if (isinstance(value, dict)):
yield section, setting, value.get("value", default)
else:
yield section, setting, value
def setOverride(self, override = None, overrideIsSave = None):
"""Applies override settings for advanced save and load managment.
override (str) - A .json file that will override sections in the given filePath
- If None: Will ignore all override conditions
- If dict: Will use the given dictionary instead of a .json file for saving and loading; can be empty
- If str: Will use the .json file located there (it will be created if neccissary)
overrideIsSave (bool) - Determines what happens when no file path is given to save()
- If True: Saves any changes to 'override', unless that value exists in 'default_filePath' in which case it will be removed from 'override'
- If False: Saves any changes to 'override'
- If None: Saves any changes to 'default_filePath'
~ Removed sections or settings are ignored
Example Input: setOverride()
Example Input: setOverride(override = "")
Example Input: setOverride(override = "settings_user_override.json")
Example Input: setOverride(override = {"Lorem": {"ipsum": 1}})
Example Input: setOverride(override = {}, overrideIsSave = True)
Example Input: setOverride(override = {}, overrideIsSave = False)
"""
if (override is None):
self.override = None
self.overrideIsSave = None
self.contents_override = {}
self.default_filePath_override = None
return
self.overrideIsSave = overrideIsSave
if (isinstance(override, dict)):
self.override = False
self.contents_override = override
self.default_filePath_override = None
return
self.override = True
self.default_filePath_override = override or f"settings_override.{self.defaultFileExtension}"
self.contents_override = {}
@contextlib.contextmanager
def _load(self, filePath = None, removeDirty = True, applyOverride = True, forceExists = False):
"""Loads the json file.
filePath (str) - Where to load the config file from
- If None: Will use the default file path
Example Input: load()
Example Input: load("database/settings_user.json")
"""
if (isinstance(filePath, dict)):
self.contents = {**filePath}
self.filePath_lastLoad = None
yield None
else:
filePath = filePath or self.default_filePath
self.filePath_lastLoad = filePath
if (not os.path.exists(filePath)):
if (not forceExists):
raise FileExistsError(filePath)
if (isinstance(forceExists, dict)):
self.set(forceExists, valid_section = None)
self.save(filePath = filePath, applyOverride = False, removeDirty = False)
with open(filePath) as fileHandle:
yield fileHandle
if (removeDirty):
self.dirty = False
if (applyOverride):
self.load_override()
@contextlib.contextmanager
def _load_override(self, filePath = None):
"""Loads the override json file.
filePath (str) - Where to load the config file from
- If None: Will use the default file path
Example Input: load_override()
Example Input: load_override("database/settings_user_override.json")
"""
if (self.override is None):
yield None
return
filePath = filePath or self.default_filePath_override
if (isinstance(filePath, dict)):
self.contents_override = {**filePath}
yield None
else:
if (self.override and (os.path.exists(filePath))):
with open(filePath) as fileHandle:
yield fileHandle
else:
yield None
MyUtilities.common.nestedUpdate(self.contents, self.contents_override, preserveNone = False)
@contextlib.contextmanager
def _save(self, filePath = None, ifDirty = True, removeDirty = True,
applyOverride = True, overrideKwargs = None, **kwargs):
"""Saves changes to json file.
filePath (str) - Where to save the config file to
- If None: Will use the default file path
ifDirty (bool) - Determines if the file should be saved only if changes have been made
Example Input: save()
Example Input: save("database/settings_user.json")
"""
global openPlus
filePath = filePath or self.default_filePath
if (ifDirty and (not self.dirty) and (os.path.exists(filePath))):
yield None
return
try:
if (applyOverride and self.save_override(**(overrideKwargs or {}))):
yield None
return
with openPlus(filePath, **kwargs) as fileHandle:
yield fileHandle
except Exception as error:
raise error
finally:
if (removeDirty):
self.dirty = False
@contextlib.contextmanager
def _save_override(self, filePath = None, *, base = None):
"""Saves changes to json file.
Note: Only looks at changes and additions, not removals.
filePath (str) - Where to save the config file to
- If None: Will use the default file path
Example Input: save_override()
Example Input: save_override("database/settings_user_override.json")
"""
global openPlus
def formatCatalogue(catalogue):
if (not isinstance(catalogue, dict)):
return {"value": catalogue}
return catalogue
#######################################
if (self.overrideIsSave is None):
yield None
return
base = base or {}
changes = collections.defaultdict(lambda: collections.defaultdict(dict))
for section, new in self.contents.items():
old = base.get(section, {})
for setting, new_catalogue in new.items():
new_catalogue = formatCatalogue(new_catalogue)
old_catalogue = formatCatalogue(old.get(setting, {}))
for option, new_value in new_catalogue.items():
old_value = old_catalogue.get(option, NULL)
if ((new_value or (option != "comment")) and ((old_value is NULL) or (new_value != old_value))):
changes[section][setting][option] = new_value
self.contents_override.clear()
MyUtilities.common.nestedUpdate(self.contents_override, changes) #Filter out defaultdict
if (self.override):
with openPlus(filePath or self.default_filePath_override) as fileHandle:
yield fileHandle
else:
yield None
def _ensure(self, section, variable = None, value = None, *, comment = None,
forceAttribute = False, makeDirty = True):
"""Makes sure that the given variable exists in the given section.
section (str) - Which section to ensure 'variable' for
- If list: Will ensure all given sections have 'variable'
- If dict: Will ignore 'variable' and 'value'
variable (str) - Which variable to ensure
- If list: Will ensure all given variables
- If dict: Will ignore 'value' and use the key as 'variable' and the value as 'value'
value (any) - The default value that 'variable' should have if it does not exist
comment (str) - Optional comment string for 'variable'
Example Input: ensure("containers", "label", value = False)
Example Input: ensure("containers", {"label": False})
Example Input: ensure({"containers": {"label": False}})
"""
for sectionCatalogue in self.ensure_container(section):
for _section, variableCatalogue in self.ensure_dict(sectionCatalogue, variable).items():
if (not self.has_section(_section)):
self.contents[_section] = {}
for _variableCatalogue in self.ensure_container(variableCatalogue):
for _variable, _value in self.ensure_dict(_variableCatalogue, value).items():
if (not self.has_setting(_variable, _section)):
if (makeDirty):
self.dirty = True
if (comment):
yield _section, _variable, {"value": _value, "comment": comment}
elif (forceAttribute):
yield _section, _variable, {"value": _value}
else:
yield _section, _variable, {_value}
@abc.abstractmethod
def load(self, *args, **kwargs):
pass
@abc.abstractmethod
def load_override(self, *args, **kwargs):
pass
@abc.abstractmethod
def save(self, *args, **kwargs):
pass
@abc.abstractmethod
def save_override(self, *args, **kwargs):
pass
@abc.abstractmethod
def ensure(self, *args, **kwargs):
pass
def get(self, section, setting = None, default = None, *,
forceAttribute = None, forceTuple = False,
filterNone = False, useForNone = None):
"""Returns the value of the given setting in the given section.
setting (str) - What variable to look for
- If list: Will return the value for each variable given
Example Input: get("lorem", "ipsum")
Example Input: get("lorem", ("ipsum", "dolor"))
"""
def formatValue(value):
nonlocal default
if (isinstance(value, dict)):
return value.get("value", default)
return value
def yieldValue():
nonlocal self, section, setting, filterNone, useForNone
for _setting in self.ensure_container(setting):
value = formatValue(self.contents[section][_setting])
if (filterNone and (value is useForNone)):
continue
yield _setting, value
####################
setting = self.ensure_default(setting, lambda: self.getSettings(section))
answer = {key: value for key, value in yieldValue()}
if ((forceAttribute is not None) and (forceAttribute or (len(answer) is not 1))):
return answer
else:
return self.oneOrMany(answer.values(), forceTuple = forceTuple)
def set(self, contents = None, update = True, makeDirty = True):
"""Adds a section to the internal contents.
contents (dict) - What to add
update (bool) - Determines what happens if a key already exists for the given 'contents'
- If True: Will update nested dictionaries
- If False: Will replace nested dictionaries
- If None: Will replace entire self.contents
Example Input: set()
Example Input: set({"lorem": 1})
Example Input: set({"ipsum": {"dolor": 4}})
Example Input: set({"ipsum": {"dolor": 5}}, update = False)
Example Input: set({"lorem": 1, "ipsum": {"dolor": 2, "sit": 3}}, update = None)
"""
contents = contents or {}
assert isinstance(contents, dict)
if (update is None):
self.contents = contents
elif (not update):
self.contents.update(contents)
else:
MyUtilities.common.nestedUpdate(self.contents, contents)
if (makeDirty):
self.dirty = True
def apply(self, handle, section, include = None, exclude = None, handleTypes = None):
"""Places default values into the supplied handle.
___________________ REQUIRED FORMAT ___________________
self.contents = {
section (str): {
variable (str): value (any),
variable (str): {
"value": value (any),
"comment": docstring (str), #optional
},
},
}
_______________________________________________________
section (str) - Which section to apply variables for
- If list: Will apply all given sections
handle (object) - What to apply the given sections to
- If list: will apply to all
include (str) - What variable is allowed to be applied
- If list: Will apply all variables in the section
handleTypes (list) - Place the type for 'section' here
Example Input: apply(test_1, "GUI_Manager", handleTypes = Test)
Example Input: apply(test_2, ("DatabaseInfo", "Users"), handleTypes = (Test,))
Example Input: apply(self, {"FrameSettings": self.label}, handleTypes = (self.__class__,))
Example Input: apply(self, {"FrameSettings": {self.label: "title"}}, handleTypes = (self.__class__,))
Example Input: apply((test1, test_2), {"Settings": ("debugging_default", "debugging_enabled")}, handleTypes = Test)
"""
def yieldApplied():
nonlocal self, handle, section, handleTypes
for _handle in self.ensure_container(handle, elementTypes = handleTypes):
for _section in self.ensure_container(section):
for item in setValue(_handle, _section, self.contents):
yield item
def setValue(_handle, _section, catalogue):
nonlocal self, include, exclude
if (isinstance(_section, dict)):
for key, value in _section.items():
for item in setValue(_handle, value, catalogue.get(key)):
yield item
return
if (_section not in catalogue):
print("@apply", f"{_section} does not exist in catalogue\n -- keys: {tuple(catalogue.keys())}")
raise NotImplementedError()
return
for variable, _catalogue in catalogue[_section].items():
if (include and (variable not in include)):
continue
if (exclude and (variable in exclude)):
continue
if ((not isinstance(_catalogue, dict) or ("value" not in _catalogue))):
setattr(_handle, variable, _catalogue)
else:
setattr(_handle, variable, _catalogue["value"])
yield variable
#######################################################
include = self.ensure_container(include)
exclude = self.ensure_container(exclude)
return tuple(yieldApplied())
def has_section(self, section = None):
"""Returns True if the section exists in the config file, otherwise returns False.
section (str) - What section to write this setting in
- If None: Will use the default section
Example Input: has_section()
Example Input: has_section(section = "AutoSave")
"""
return (section or self.default_section) in self.contents
def has_setting(self, variable, section = None):
"""Returns True if the setting exists in given section of the config file, otherwise returns False.
section (str) - What section to write this setting in
- If None: Will use the default section
Example Input: has_setting("startup_user")
Example Input: has_setting("scanDelay", section = "AutoSave")
"""
return variable in self.contents.get(section or self.default_section, {})
def is_dirty(self):
"""Returns True if changes have been made that are not yet saved, otherwise returns False.
Example Input: is_dirty()
"""
return self.dirty
def getSections(self, variable = None):
"""Returns a list of existing sections.
variable (str) - What variable must exist in the section
- If None: Will not search for sections by variable
Example Input: getSections()
Example Input: getSections(variable = "debugging_default")
"""
if (variable is None):
return tuple(self.contents.keys())
return tuple(key for key, catalogue in self.contents.items() if (variable in catalogue.keys()))
def getSettings(self, section = None, valuesAsSet = False):
"""Returns a list of existing settings for the given section.
section (str) - What section to write this setting in
- If list: Will use all in list
- If None: Will use all existing sections
Example Input: getSettings()
Example Input: getSettings("AutoSave")
"""
if (valuesAsSet):
container = set
else:
container = tuple
return container(variable for key in (self.ensure_container(section) or self.contents.keys()) for variable in self.contents[key].keys())
class JSON_Aid(Config_Base):
"""Utility API for json scripts.
Use: https://martin-thoma.com/configuration-files-in-python/#json
"""
def __init__(self, default_filePath = None, **kwargs):
"""
Example Input: JSON_Aid()
"""
super().__init__(default_filePath = default_filePath or "settings.json", defaultFileExtension = "json", **kwargs)
def load(self, *args, **kwargs):
"""Loads the json file.
filePath (str) - Where to load the config file from
- If None: Will use the default file path
Example Input: load()
Example Input: load("database/settings_user.json")
"""
with self._load(*args, **kwargs) as fileHandle:
if (fileHandle is not None):
self.contents = json.load(fileHandle) or {}
return self.contents
def load_override(self, *args, **kwargs):
"""Loads the override json file.
filePath (str) - Where to load the config file from
- If None: Will use the default file path
Example Input: load_override()
Example Input: load_override("database/settings_user_override.json")
"""
with self._load_override(*args, **kwargs) as fileHandle:
if (fileHandle is not None):
self.contents_override = json.load(fileHandle) or {}
def save(self, *args, **kwargs):
"""Saves changes to json file.
filePath (str) - Where to save the config file to
- If None: Will use the default file path
ifDirty (bool) - Determines if the file should be saved only if changes have been made
Example Input: save()
Example Input: save("database/settings_user.json")
"""
with self._save(*args, **kwargs) as fileHandle:
if (fileHandle is not None):
json.dump(self.contents or None, fileHandle, indent = "\t")
def save_override(self, *args, base = None, **kwargs):
"""Saves changes to json file.
Note: Only looks at changes and additions, not removals.
filePath (str) - Where to save the config file to
- If None: Will use the default file path
Example Input: save_override()
Example Input: save_override("database/settings_user_override.json")
"""
if (base is None):
with open(self.filePath_lastLoad) as fileHandle:
base = json.load(fileHandle) or {}
with self._save_override(*args, base = base, **kwargs) as fileHandle:
if (fileHandle is not None):
json.dump(self.contents_override or None, fileHandle, indent = "\t")
return True
def ensure(self, *args, saveToOverride = None, **kwargs):
"""Makes sure that the given variable exists in the given section.
Example Input: ensure("containers", "label", value = False)
Example Input: ensure("containers", "label", value = False, saveToOverride = True)
Example Input: ensure("containers", "label", value = False, saveToOverride = False)
"""
global openPlus
if (saveToOverride is None):
for section, variable, value in self._ensure(*args, **kwargs):
self.contents[section][variable] = value
return True
filePath = (self.filePath_lastLoad, self.default_filePath_override)[saveToOverride]
with open(filePath) as fileHandle:
base = json.load(fileHandle) or {}
changed = False
for section, variable, value in self._ensure(*args, **kwargs):
self.contents[section][variable] = value
changed = True
if (section not in base):
base[section] = {}
base[section][variable] = value
if (changed):
with openPlus(filePath) as fileHandle:
json.dump(base or None, fileHandle, indent = "\t")
return True
class YAML_Aid(Config_Base):
"""Utility API for yaml scripts.
Use: https://pyyaml.org/wiki/PyYAMLDocumentation
Use: https://martin-thoma.com/configuration-files-in-python/#yaml
"""
def __init__(self, default_filePath = None, **kwargs):
"""
Example Input: YAML_Aid()
"""
super().__init__(default_filePath = default_filePath or "settings.yaml", defaultFileExtension = "yaml", **kwargs)
def load(self, *args, **kwargs):
"""Loads the yaml file.
filePath (str) - Where to load the config file from
- If None: Will use the default file path
Example Input: load()
Example Input: load("database/settings_user.yaml")
"""
with self._load(*args, **kwargs) as fileHandle:
if (fileHandle is not None):
self.contents = yaml.load(fileHandle) or {}
return self.contents
def load_override(self, *args, **kwargs):
"""Loads the override yaml file.
filePath (str) - Where to load the config file from
- If None: Will use the default file path
Example Input: load_override()
Example Input: load_override("database/settings_user_override.yaml")
"""
with self._load_override(*args, **kwargs) as fileHandle:
if (fileHandle is not None):
self.contents_override = yaml.load(fileHandle) or {}
def save(self, *args, explicit_start = True, explicit_end = True, width = None, indent = 4,
default_style = None, default_flow_style = None, canonical = None, line_break = None,
encoding = None, allow_unicode = None, version = None, tags = None, **kwargs):
"""Saves changes to yaml file.
filePath (str) - Where to save the config file to
- If None: Will use the default file path
ifDirty (bool) - Determines if the file should be saved only if changes have been made
Example Input: save()
Example Input: save("database/settings_user.yaml")
"""
with self._save(*args, overrideKwargs = {"explicit_start": explicit_start, "width": width, "indent": indent,
"canonical": canonical, "default_flow_style": default_flow_style}, **kwargs) as fileHandle:
if (fileHandle is not None):
yaml.dump(self.contents or None, fileHandle, explicit_start = explicit_start, explicit_end = explicit_end, width = width,
default_style = default_style, default_flow_style = default_flow_style, canonical = canonical, indent = indent,
encoding = encoding, allow_unicode = allow_unicode, version = version, tags = tags, line_break = line_break)
def save_override(self, *args, base = None, explicit_start = True, explicit_end = True, width = None, indent = 4,
default_style = None, default_flow_style = None, canonical = None, line_break = None,
encoding = None, allow_unicode = None, version = None, tags = None, **kwargs):
"""Saves changes to yaml file.
Note: Only looks at changes and additions, not removals.
filePath (str) - Where to save the config file to
- If None: Will use the default file path
Example Input: save_override()
Example Input: save_override("database/settings_user_override.yaml")
"""
if (base is None):
with open(self.filePath_lastLoad) as fileHandle:
base = yaml.load(fileHandle) or {}
with self._save_override(*args, base = base, **kwargs) as fileHandle:
if (fileHandle is not None):
yaml.dump(self.contents_override or None, fileHandle, explicit_start = explicit_start, explicit_end = explicit_end, width = width,
default_style = default_style, default_flow_style = default_flow_style, canonical = canonical, indent = indent,
encoding = encoding, allow_unicode = allow_unicode, version = version, tags = tags, line_break = line_break)
return True
def ensure(self, *args, saveToOverride = None, explicit_start = True, explicit_end = True, width = None, indent = 4,
default_style = None, default_flow_style = None, canonical = None, line_break = None,
encoding = None, allow_unicode = None, version = None, tags = None, **kwargs):
"""Makes sure that the given variable exists in the given section.
Example Input: ensure("containers", "label", value = False)
Example Input: ensure("containers", "label", value = False, saveToOverride = True)
Example Input: ensure("containers", "label", value = False, saveToOverride = False)
"""
global openPlus
if (saveToOverride is None):
for section, variable, value in self._ensure(*args, **kwargs):
self.contents[section][variable] = value
return True
filePath = (self.filePath_lastLoad, self.default_filePath_override)[saveToOverride]
with open(filePath) as fileHandle:
base = yaml.load(fileHandle) or {}
changed = False
for section, variable, value in self._ensure(*args, **kwargs):
self.contents[section][variable] = value
changed = True
if (section not in base):
base[section] = {}
base[section][variable] = value
if (changed):
with openPlus(filePath) as fileHandle:
yaml.dump(base or None, fileHandle, explicit_start = explicit_start, explicit_end = explicit_end, width = width,
default_style = default_style, default_flow_style = default_flow_style, canonical = canonical, indent = indent,
encoding = encoding, allow_unicode = allow_unicode, version = version, tags = tags, line_break = line_break)
return True
def quiet(*args):
pass
print(*args)
def sandbox():
def test_yaml():
class Test(): pass
test_1 = Test()
test_2 = Test()
# yaml_api = build_yaml(default_filePath = "test/settings.yaml", forceExists = True)
# print(yaml_api)
# yaml_api = build_yaml(default_filePath = "M:/Versions/dev/Settings/default_user.yaml", override = {"GUI_Manager": {"startup_window": "settings"}})
yaml_api = build_yaml(default_filePath = "M:/Versions/dev/Settings/default_user.yaml", override = "M:/Versions/dev/Settings/temp_default_user.yaml", overrideIsSave = True)
quiet(yaml_api.apply(test_1, "GUI_Manager", handleTypes = Test))
quiet(yaml_api.apply(test_2, ("Barcodes", "Users"), handleTypes = (Test,)))
quiet(yaml_api.apply((test_1, test_2), "Settings", ("debugging_default", "debugging_enabled"), handleTypes = (Test,)))
quiet(vars(test_1))
quiet(vars(test_2))
quiet(yaml_api.getSettings())
quiet(yaml_api.getSettings("Users"))
quiet(yaml_api.getSections(variable = "debugging_default"))
yaml_api.set({"GUI_Manager": {"startup_window": "main"}})
yaml_api.save()
def test_json():
class Test(): pass
test_1 = Test()
test_2 = Test()
# json_API = build_json(default_filePath = "M:/Versions/dev/Settings/default_user.json", override = {"GUI_Manager": {"startup_window": "settings"}})
json_API = build_json(default_filePath = "M:/Versions/dev/Settings/default_user.json", override = "M:/Versions/dev/Settings/temp_default_user.json", overrideIsSave = True)
json_API.apply(test_1, "GUI_Manager", handleTypes = Test)
json_API.apply(test_2, ("Barcodes", "Users"), handleTypes = (Test,))
json_API.apply((test_1, test_2), "Settings", ("debugging_default", "debugging_enabled"), handleTypes = (Test,))
quiet(vars(test_1))
quiet(vars(test_2))
quiet(json_API.getSettings("Users"))
quiet(json_API.getSections(variable = "debugging_default"))
json_API.set({"GUI_Manager": {"startup_window": "main"}})
json_API.save()
###############################
test_json()
# test_yaml()
def build(*args, **kwargs):
"""Creates a YAML_Aid object."""
return build_yaml(*args, **kwargs)
def build_json(*args, **kwargs):
"""Creates a JSON_Aid object."""
return JSON_Aid(*args, **kwargs)
def build_yaml(*args, **kwargs):
"""Creates a YAML_Aid object."""
return YAML_Aid(*args, **kwargs)
def main():
"""The main program controller."""
sandbox()
if __name__ == '__main__':
main()
```
#### File: JoshMayberry/SQLite-Database/settingsLoader.py
```python
import sys
import contextlib
from API_Database import db_config
import MyUtilities.common
import MyUtilities.logger
import MyUtilities.wxPython
NULL = MyUtilities.common.NULL
#Decorators
wrap_skipEvent = MyUtilities.wxPython.wrap_skipEvent
def build(*args, **kwargs):
return LoadingController(*args, **kwargs)
class LoadingController(MyUtilities.common.EnsureFunctions, MyUtilities.common.CommonFunctions, MyUtilities.logger.LoggingFunctions):
logger_config = {
None: {
"level": 1,
},
"console": {
"type": "stream",
"level": 1,
},
}
def __init__(self, module = None, filePath = "settings.ini", section = "parameters", *, logger_name = None, logger_config = None, **configKwargs):
MyUtilities.common.EnsureFunctions.__init__(self)
MyUtilities.common.CommonFunctions.__init__(self)
MyUtilities.logger.LoggingFunctions.__init__(self, label = logger_name or __name__, config = logger_config or self.logger_config, force_quietRoot = __name__ == "__main__")
self.building = True
self.databound_widgets = {} # {
# settings variable (str): [
# {
# "widget": GUI_Maker widget,
# "variable": The settings variable this is used for (duplicate of parent key)
# "displayOnly": If setting values can be modified (bool),
# "getter": What function to run to get the setting from the widget (function),
# "setter": What function to run to set the setting in the widget (function),
# "toggle": [
# {
# "widget": A widget to toggle,
# "enable": If the enable state should be toggled (bool),
# "show": If the show state should be toggled (bool),
# "saveError": If an error means this value shoudl not be saled (bool),
# "checkFunctions": List of functions to run that control the toggle state (function),
# "updateFrames": Set of frames to update with status messages
# }
#],
# }
# ]
# }
self.module = self.ensure_default(module, default = self)
self.database = db_config.build(default_filePath = filePath, default_section = section, **configKwargs)
self.default_updateFrames = set()
self.loadSettings()
#User Functions
def applyUserFunctions(self):
self.module.getSetting = self.getSetting
self.module.setSetting = self.setSetting
self.module.addSettingWidget = self.addSettingWidget
self.module.addToggleWidget = self.addToggleWidget
def setBuilding(self, state):
self.building = state
@contextlib.contextmanager
def isBuilding(self):
current_building = self.building
self.setBuilding(True)
yield
self.setBuilding(current_building)
def finished(self):
self.setBuilding(False)
#Setting Functions
def loadSettings(self):
"""Returns the requested setting.
Example Input: loadSettings()
"""
for variable, value in self.database.get(forceSetting = True).items():
setattr(self.module, variable, value)
def getSetting(self, variable):
"""Returns the requested setting.
Example Input: getSetting("offsets")
"""
return self.database.get(variable)
def setSetting(self, variable, value, *, refreshGUI = True):
"""Changes the provided setting.
Example Input: setSetting("offsets", 3)
"""
self.log_warning(f"Changing Value", variable = variable, value = value)
self.database.set(variable, value, save = True)
if (refreshGUI):
self.setGuiParameter(variable, value)
setattr(self.module, variable, value)
#Widget Functions
def addSettingWidget(self, variable, myWidget, *, getter = None, setter = None,
displayOnly = False, updateGUI = True, checkAnother = None, check_onUpdate = None,
autoSave = True, autoSave_check = NULL, autoSave_getterArgs = None, autoSave_getterKwargs = None,
toggleWidget = None, checkFunction = None, toggle_enable = NULL, toggle_show = NULL, toggle_saveError = NULL):
"""Connects the *myWidget* to *variable*.
Returns the index for *myWidget* in the list of widgets for *variable*.
variable (str) - What setting variable this widget connects to
myWidget (guiWidget) - The widget that modifies this setting
getter (function) - What function to run to get the value from *myWidget*
setter (function) - What function to run to set the value for *myWidget*
displayOnly (bool) - If the setting can be modified
updateGUI (bool) - If *myWidget* should be updated with the new setting value
autoSave (bool) - If the setting value should be saved after it is edited
toggleWidget (guiWidget) - A widget to toggle the state of based on *checkFunction*
checkFunction (function) - A function that controls the state of *toggleWidget*
Example Input: addSettingWidget("current_quantity", myWidget)
Example Input: addSettingWidget("current_quantity", myWidget, checkAnother = "current_job")
Example Input: addSettingWidget("current_quantity", myWidget, checkAnother = ["current_job", "current_date"])
Example Input: addSettingWidget("current_quantity", myWidget, autoSave_check = True, displayOnly = True)
Example Input: addSettingWidget("current_date_use_override", myWidget, toggleWidget = "current_date", toggle_saveError = True, checkFunction = lambda value: not value)
"""
if (variable in self.databound_widgets):
self.log_warning(f"Overwriting databound widget {variable}")
else:
self.databound_widgets[variable] = []
widgetCatalogue = {
"widget": myWidget,
"displayOnly": displayOnly,
"getter": (myWidget.getValue, getter)[getter is not None],
"setter": (myWidget.setValue, setter)[setter is not None],
"toggle": [],
"variable": variable,
"checkAnother": checkAnother,
}
myWidget._databoundSettingCatalogue = widgetCatalogue
self.databound_widgets[variable].append(widgetCatalogue)
index = len(self.databound_widgets[variable]) - 1
if (autoSave):
self.autoSave(variable = variable, widgetCatalogue = widgetCatalogue, check = autoSave_check, save = not displayOnly,
getterArgs = autoSave_getterArgs, getterKwargs = autoSave_getterKwargs)
if (toggleWidget is not None):
if (checkFunction is not None):
self.addToggleWidget(variable = variable, toggleWidget = toggleWidget, checkFunction = checkFunction,
widgetCatalogue = widgetCatalogue, enable = toggle_enable, show = toggle_show, saveError = toggle_saveError)
else:
self.log_error(f"Must provide 'checkFunction' along with 'toggleWidget' to add a toggle widget for {label}")
else:
if (checkFunction is not None):
self.log_error(f"Must provide 'toggleWidget' along with 'checkFunction' to add a toggle widget for {label}")
if (check_onUpdate and not autoSave_check):
myWidget.setFunction_click(self.onCheckAll, myFunctionKwargs = { "variable": variable })
if (checkAnother):
myWidget.setFunction_click(self.onCheckAll, myFunctionKwargs = { "variable": checkAnother })
if (updateGUI):
self.updateGuiSettings(variable = variable, widgetCatalogue = widgetCatalogue)
return index
def _yieldWidgetCatalogue(self, variable = None, index = None, *, widgetCatalogue = None, exclude = ()):
"""Yields the widget catalogue(s) for *variable*.
variable (str) - What setting to yield the widget catalogue(s) for
~ If None: Will yield for all variables
~ If List: Will yield for all variables in the list
index (int) - Which widget yield the widget catalogue(s) for (in order added)
~ If None: Will yield for all widgets for *variable*
~ If List: Will yield for all widgets for *variable* in the list
widgetCatalogue (dict) - If provided, will yield this instead of doing the normal yield routine
Example Input: _yieldWidgetCatalogue()
Example Input: _yieldWidgetCatalogue("current_quantity")
Example Input: _yieldWidgetCatalogue("current_quantity", 2)
Example Input: _yieldWidgetCatalogue(["current_quantity", "current_file"])
Example Input: _yieldWidgetCatalogue(widgetCatalogue = widgetCatalogue)
"""
def yieldVariable():
nonlocal variable
if (variable is None):
for item in self.databound_widgets.keys():
yield item
return
for item in self.ensure_container(variable):
if (item not in self.databound_widgets):
self.log_error(f"'{item}' not found in databound widgets; cannot update GUI")
continue
yield item
def yieldIndex(_variable):
nonlocal index
numberOfWidgets = len(self.databound_widgets[_variable])
if (index is None):
for item in range(numberOfWidgets):
yield item
return
for item in self.ensure_container(index):
if (item >= numberOfWidgets):
self.log_error(f"There are less than '{item}' databound widgets for '{_variable}'; cannot update GUI")
continue
yield item
######################################################
if (widgetCatalogue != None):
yield widgetCatalogue
return
for _variable in yieldVariable():
if (_variable in exclude):
continue
for _index in yieldIndex(_variable):
yield self.databound_widgets[_variable][_index]
def yieldSettingsWidget(self, variable = None, index = None):
"""Yields the settings widget for *variable* at the insert position of *index*
Example Input: yieldSettingsWidget("current_quantity")
"""
for _widgetCatalogue in self._yieldWidgetCatalogue(variable = variable, index = index):
yield _widgetCatalogue["widget"]
def autoSave(self, variable = None, index = None, *, widgetCatalogue = None,
check = True, save = True, getterArgs = None, getterKwargs = None):
"""Sets up *variable* to automatically save after it is interacted with.
variable (str) - What setting to automatically save for
index (int) - Which widget(s) to automatically save when interacted with (in order added)
check (bool) - If check functions should all pass before it is allowed to save
Example Input: autoSave()
Example Input: autoSave("current_quantity")
"""
for _widgetCatalogue in self._yieldWidgetCatalogue(variable = variable, index = index, widgetCatalogue = widgetCatalogue):
myWidget = _widgetCatalogue["widget"]
myWidget.setFunction_click(myFunction = self.onChangeSetting, myFunctionKwargs = {
"variable": self.ensure_default(variable, default = _widgetCatalogue["variable"]), "myWidget": myWidget,
"check": check, "save": save, "getterArgs": getterArgs, "getterKwargs": getterKwargs,
})
def addToggleWidget(self, toggleWidget, checkFunction, variable = None, index = None, *, widgetCatalogue = None,
enable = True, show = False, saveError = False, updateFrame = None):
"""Allows the widget(s) for *variable* to toggle *toggleWidget* based on the results of *checkFunction*.
toggleWidget (guiWidget) - The widget to toggle states on
checkFunction (function) - A function to run to see if the state shoudl be toggled
~ If returns Falsey: Will make the toggle state positive
~ If returns Truthy: Will make the toggle state negative
~ If returns a string: Will also display the string as a status message for *updateFrame*
variable (str) - What setting to add a toggle widget to
index (int) - Which widget(s) to connect the toggle widget
saveError (bool) - If an error state means the setting should not be saved
enable (bool) - If the enable state of *toggleWidget* should be toggled
show (bool) - If the show state of *toggleWidget* should be toggled
updateFrame (guiWindow) - The window(s) to update status text on
Example Input: addToggleWidget(self.widget_submitButton, self.check_file, variable = "current_file")
Example Input: addToggleWidget(self.widget_submitButton, self.check_file, variable = "current_file", saveError = True)
Example Input: addToggleWidget(self.widget_submitButton, self.check_file, variable = "current_file", enable = False, show = True)
"""
def yieldToggleWidget(_widgetCatalogue):
nonlocal toggleWidget
if (isinstance(toggleWidget, str)):
yielded = False
for catalogue in self._yieldWidgetCatalogue(variable = toggleWidget, index = None):
yield catalogue["widget"]
yielded = True
if (not yielded):
self.log_error(f"Could not find toggle widget for '{toggleWidget}'")
return
yield toggleWidget
#################################
show = self.ensure_default(show, False, defaultFlag = NULL)
enable = self.ensure_default(enable, True, defaultFlag = NULL)
saveError = self.ensure_default(saveError, False, defaultFlag = NULL)
_checkFunctions = self.ensure_container(checkFunction)
_updateFrames = None if (updateFrame is None) else self.ensure_container(updateFrame)
for _widgetCatalogue in self._yieldWidgetCatalogue(variable = variable, index = index, widgetCatalogue = widgetCatalogue):
for _toggleWidget in yieldToggleWidget(_widgetCatalogue):
_widgetCatalogue["toggle"].append({
"toggleWidget": _toggleWidget,
"saveError": saveError,
"enable": enable,
"show": show,
"checkFunctions": _checkFunctions,
"updateFrames": _updateFrames,
})
def _getWidgetValue(self, widgetCatalogue, *, getterArgs = None, getterKwargs = None):
"""Returns the value of the widget in the widget catalogue.
Example Input: _getWidgetValue(widgetCatalogue)
"""
return self.runMyFunction(myFunction = widgetCatalogue["getter"], myFunctionArgs = getterArgs, myFunctionKwargs = getterKwargs)
def changeSetting(self, variable, myWidget, *, check = False, save = False, checkBuilding = True,
getterArgs = None, getterKwargs = None):
"""An event for when a setting widget is modified.
variable (str) - Which variable to change
myWidget (guiWidget) - The widget to get the value from
"""
if (checkBuilding and self.building):
return
value = self._getWidgetValue(myWidget._databoundSettingCatalogue, getterArgs = getterArgs, getterKwargs = getterKwargs)
if (check and (not self._checkSetting(variable = variable, value = value, widgetCatalogue = myWidget._databoundSettingCatalogue))):
return
if (save):
self.setSetting(variable, value, refreshGUI = False)
else:
setattr(self.module, variable, value)
@wrap_skipEvent()
def onChangeSetting(self, event, *args, **kwargs):
"""A wxEvent version of *changeSetting*."""
self.changeSetting(*args, **kwargs)
def resetSettings(self):
"""Changes the all settings to their default values."""
self.log_warning(f"Resetting Values")
with self.isBuilding():
self.database.set(self.database.get(section = "DEFAULT", forceSetting = True), section = "parameters", save = True)
self.loadSettings()
self.updateGuiSettings()
@wrap_skipEvent()
def onResetSettings(self, event, *args, **kwargs):
"""A wxEvent version of *resetSettings*."""
self.resetSettings(*args, **kwargs)
#GUI Functions
def updateGuiSettings(self, variable = None, index = None, *, widgetCatalogue = None):
"""Updates the widget(s) for *variable*.
variable (str) - What setting to update widgets for
index (int) - Which widget to update (in order added)
Example Input: updateGuiSettings()
Example Input: updateGuiSettings("current_quantity")
"""
for _widgetCatalogue in self._yieldWidgetCatalogue(variable = variable, index = index, widgetCatalogue = widgetCatalogue):
_widgetCatalogue["setter"](getattr(self.module, variable or _widgetCatalogue["variable"]))
@wrap_skipEvent()
def onUpdateGuiSettings(self, event, *args, **kwargs):
"""A wxEvent version of *updateGuiSettings*."""
self.updateGuiSettings(*args, **kwargs)
def setDefaultUpdateWindow(self, myFrame = None):
"""Adds *myFrame* to the list of default frames to push status messages to when check functions are run.
myFrame (guiWindow) - Which window to update the status message of
- If list: All windows in the list will be updated
- If None: No window will be updated
Example setDefaultUpdateWindow(self.frame_main)
Example setDefaultUpdateWindow([self.frame_main, self.frame_html])
"""
self.default_updateFrames.update(self.ensure_container(myFrame))
def _setStatusText(self, myFrame, text = None):
"""Updates the status text for *myFrame*
myFrame (guiWindow) - Which window to update the status message of
- If list: All windows in the list will be updated
- If None: Will update the windows in *default_updateFrames*
text (str) - What the status text will say
Example Input: _setStatusText(myFrame, text)
"""
if (myFrame is None):
_updateFrames = self.default_updateFrames
else:
_updateFrames = self.ensure_container(myFrame)
for myFrame in _updateFrames:
myFrame.setStatusText(text)
def yieldCheckFunctionResult(self, value, widgetCatalogue):
for toggleCatalogue in widgetCatalogue["toggle"]:
for checkFunction in toggleCatalogue["checkFunctions"]:
yield checkFunction(value), toggleCatalogue
def _checkSetting(self, variable, value, widgetCatalogue, *, updateGUI = True):
windowCatalogue = {} # {updateFrame (guiWindow): Error Message (str)}
toggleStateCatalogue = {} # {toggleWidget (guiWidget): {"state": If the state is positive or negative (bool), "enable": If the enable state can be changes (bool), "show": If the shown state can be changed (bool)}}
noError = True
for errorMessage, toggleCatalogue in self.yieldCheckFunctionResult(value, widgetCatalogue):
toggleWidget = toggleCatalogue["toggleWidget"]
if (not errorMessage):
# Do not overwrite a negative
if (toggleWidget not in toggleStateCatalogue):
toggleStateCatalogue[toggleWidget] = {"state": True, "enable": toggleCatalogue["enable"], "show": toggleCatalogue["show"]}
if (toggleCatalogue["updateFrames"] not in windowCatalogue):
windowCatalogue[toggleCatalogue["updateFrames"]] = None
continue
#Account for saving on an error being ok
noError = noError and toggleCatalogue["saveError"]
# Only show the first error found for their respective window(s)
if ((toggleCatalogue["updateFrames"] not in windowCatalogue) or (windowCatalogue[toggleCatalogue["updateFrames"]] is None)):
windowCatalogue[toggleCatalogue["updateFrames"]] = errorMessage
# Ensure a negative
toggleStateCatalogue[toggleWidget] = {"state": False, "enable": toggleCatalogue["enable"], "show": toggleCatalogue["show"]}
if (updateGUI):
for toggleWidget, stateCatalogue in toggleStateCatalogue.items():
if (stateCatalogue["enable"]):
toggleWidget.setEnable(stateCatalogue["state"])
if (stateCatalogue["show"]):
toggleWidget.setShow(stateCatalogue["state"])
for myFrame, text in windowCatalogue.items():
if (isinstance(text, str)):
self._setStatusText(myFrame, text)
return noError
def checkAll(self, exclude = (), *, updateGUI = True, getterArgs = None, getterKwargs = None, variable = None, index = None):
"""Runs all check functions.
Example Input: checkAll()
Example Input: checkAll(variable = "current_job")
"""
noError = True
for _widgetCatalogue in self._yieldWidgetCatalogue(exclude = exclude, variable = variable, index = index):
variable = _widgetCatalogue["variable"]
value = self._getWidgetValue(_widgetCatalogue, getterArgs = getterArgs, getterKwargs = getterKwargs)
noError = noError and self._checkSetting(variable = variable, value = value, widgetCatalogue = _widgetCatalogue)
if (noError and updateGUI):
updateFrameList = set()
for _widgetCatalogue in self._yieldWidgetCatalogue(exclude = exclude, variable = variable, index = index):
if (_widgetCatalogue["toggle"]):
for item in _widgetCatalogue["toggle"]:
updateFrameList.add(item["updateFrames"])
else:
updateFrameList.add(None)
for myFrame in updateFrameList:
self._setStatusText(myFrame)
return noError
@wrap_skipEvent()
def onCheckAll(self, event, *args, **kwargs):
"""A wxEvent version of *checkAll*."""
self.checkAll(*args, **kwargs)
``` |
{
"source": "JoshMayberry/VMG_Wegsite",
"score": 3
} |
#### File: VMG_Wegsite/Treehouse/helper_functions.py
```python
import io
import os
import sys
import json
import time
import logging
import datetime
import requests
import traceback
import itertools
import collections
import configparser
import numpy
import pandas
import dropbox
import psycopg2
import psycopg2.extras
from webflowpy.Webflow import Webflow
import common
import storage_util
# pip install
# pandas
# dropbox
# psycopg2
# azure-storage-blob
debugging = False
logger = logging.getLogger(__name__)
def logger_debug():
""" Changes the log level to debug.
See: https://docs.python.org/3/howto/logging.html
Example Input: logger_debug()
"""
global debugging
debugging = True # Use this to short-circuit expensive f-strings
logging.basicConfig(level=logging.DEBUG)
def logger_info():
""" Changes the log level to info.
See: https://docs.python.org/3/howto/logging.html
Example Input: logger_info()
"""
logging.basicConfig(level=logging.INFO)
logger_timers = collections.defaultdict(dict)
def logger_timer(label=None):
""" A decorator that records how long a function took to execute.
If the logger level is not info or debug, will not do anything.
See: https://docs.python.org/3/howto/logging.html#optimization
label (str) - What the timer is called
- If None: Will assign a unique number as the label for this timer
EXAMPLE USE
@logger_timer()
def longFunction():
pass
EXAMPLE USE
@logger_timer("lorem")
def longFunction():
pass
"""
if (not logger.isEnabledFor(logging.INFO)):
def decorator(myFunction):
return myFunction
return decorator
def decorator(myFunction):
def wrapper(*args, **kwargs):
nonlocal label
if (label is None):
label = len(logger_timers)
while label in logger_timers:
label += 1
catalogue = logger_timers[label]
logging.info(f"Starting the '{label}' timer")
catalogue["start"] = time.perf_counter()
answer = myFunction(*args, **kwargs)
logging.info(f"Ending the '{label}' timer")
catalogue["end"] = time.perf_counter()
return answer
return wrapper
return decorator
def logger_timer_print(label=None):
""" Prints the times for timers.
label (str) - Which timer to print the time for
- If None: Will print for all timers
- If tuple: WIll print for each timer in the list
Example Input: logger_timer_print()
Example Input: logger_timer_print("lorem")
"""
for _label in common.ensure_container(label, convertNone=False, returnForNone=lambda: logger_timers.keys()):
catalogue = logger_timers[_label]
print(f"{_label}: {catalogue.get('end', time.perf_counter()) - catalogue['start']:.2f}")
def tryExcept(myFunction):
""" A decorator the surrounds the inner function with a try/except
EXAMPLE USE
@tryExcept
def fragileFunction():
pass
"""
def wrapper(*args, **kwargs):
try:
return myFunction(*args, **kwargs)
except Exception as error:
traceback.print_exception(type(error), error, error.__traceback__)
return wrapper
parser = None
def config(key=None, section="postgresql", *, filename="database.ini", useCached=True, defaultValue=None, **kwargs):
""" Reads value(s) from the config file.
section (str) - Which ini section to read from
key (str) - Which key to return from the ini file
- If None: Returns a dictionary of all items in *section*
filename (str) - Where the ini file is located
useCached (bool) - If the config file loaded last time should be reused
defaultValue (any) - What shoudl be used if *key* is not in *section*
Example Input: config()
Example Input: config(section="ipsum")
Example Input: config(filename="lorem.ini")
Example Input: config(key="token", section="dropbox")
"""
global parser
if (not useCached or not parser):
parser = configparser.ConfigParser()
parser.read(filename)
if not parser.has_section(section):
raise ValueError(f"Section '{section}' not found in the '{filename}' file")
if (key):
return parser[section].get(key, defaultValue)
return {key: value for (key, value) in parser.items(section)}
def _ma_getToken(login_user="VinApi", login_password="<PASSWORD>", **kwargs):
""" Function to return MA access token.
See: https://docs.python-requests.org/en/v1.0.0/api/#main-interface
login_user (str) - The username to use for logging in
login_password (str) - The password to use for logging in
Example Input: _ma_getToken()
Example Input: _ma_getToken("Lorem", "Ipsum")
"""
logging.info("Getting MA token...")
response = requests.request("POST", "https://api.manageamerica.com/api/account/signin",
headers = {"Content-Type": "application/json"},
data = json.dumps({"login": login_user, "password": login_password}),
)
token = response.json()["token"]
logging.debug(f"ma_token: '{token}'")
return token
def ma_getData(url, *, token=None, alias=None, modifyData=None, filterData=None, customReport=False, **kwargs):
""" Returns data from Manage America.
url (str or tuple) - The URL to pull the data from
- If tuple: Will get the data from each URL in the list
alias (dict of str) - Key names to replace in the data source if they exist
modifyData (function) - A function which modifies the data before it is returned
filterData (function) - A function which filters the data before it is modified or returned
customReport (bool) - If the result should be parsed as a manage america 'adds report'
Example Input: ma_getData("https://n6.manageamerica.com/api/property/?companyId=233")
Example Input: ma_getData("https://n6.manageamerica.com/api/property/?companyId=233", modifyData=lambda data: [*data, {"lorem": "ipsum"}])
Example Input: ma_getData("https://n6.manageamerica.com/api/property/?companyId=233", filterData=lambda item: row.get("type") == "Detail")
Example Input: ma_getData(["https://www.lorem.com", "https://www.ipsum.com"])
Example Input: ma_getData("https://n6.manageamerica.com/api/addsReport/v1/runReport?Company_Id=233&Report_Id=4553", customReport=True)
Example Input: ma_getData("https://n6.manageamerica.com/api/property/?companyId=233", alias={"Old Name 1": "new_name_1", "Old Name 2": "new_name_2"})
"""
token = token or _ma_getToken(**kwargs)
data = []
urlList = common.ensure_container(url)
# urlList = common.ensure_container(url)[:1] #FOR DEBUGGING
for (index, _url) in enumerate(urlList):
logging.info(f"Getting data for url {index + 1}: '{_url}'")
response = requests.request("GET", _url,
headers = {"Authorization": f"Bearer {token}"},
data = {},
)
try:
answer = response.json()
if (isinstance(answer, dict)):
if (answer.get("message", None)):
raise ValueError(answer)
data.append(answer)
else:
data.extend(answer or ())
except requests.exceptions.JSONDecodeError as error:
print(f"url: '{_url}'")
raise error
# logging.debug(debugging and f"ma_response: '{data}'")
if (not len(data)):
return
if (customReport):
logging.info("Parsing custom report data...")
columns = tuple(item["name"] for item in data[0]["columns"])
data = tuple({key: value for (key, value) in itertools.zip_longest(columns, item["data"])} for item in data[0]["rows"] if (item["type"] != "Total"))
if (not len(data)):
return
if (alias):
logging.info("Applying alias to data...")
data = tuple({alias.get(key, key): value for (key, value) in catalogue.items()} for catalogue in data)
if (filterData):
logging.info("Filtering data...")
for myFunction in common.ensure_container(filterData):
data = data.filter(myFunction)
if (modifyData):
logging.info("Modifying data...")
for myFunction in common.ensure_container(modifyData):
data = myFunction(data)
return data
def fs_getData(form, *, view=None, server=None, user=None, bearer=None, cookie=None):
""" Returns data from Formsite.
form (str or tuple) - The form code of the form to return data for
- If tuple: Will get the data from each form code in the list
view (str) - Which result view to return data for
append (bool) - If the results should be appended to what is already in the file or not
server (str) - Which formsite server to use
user (str) - Which formsite user account to use
bearer (str) - The bearer token to use
cookie (str) - The cookies token to send
Example Input: fs_getData("wjsrave6n6")
Example Input: fs_getData("rf1gmwaueh", view=101)
"""
user = user or config("user", "formsite")
server = server or config("server", "formsite")
bearer = bearer or config("bearer", "formsite")
cookie = cookie or config("cookie", "formsite")
data = []
headers = {
"Authorization": bearer,
"Cookie": cookie,
}
for _form in common.ensure_container(form):
url_base = f"https://{server}.formsite.com/api/v2/{user}/forms/{_form}/"
# Get Pipe Labels
response = requests.request("GET", f"{url_base}/items", headers=headers)
pipe_labels = {item["id"]: item["label"] for item in response.json()["items"]}
# Get Pipe Values
response = requests.request("GET", f"{url_base}/results", headers=headers)
last_page = response.headers["Pagination-Page-Last"]
for page in range(1, int(last_page) + 1):
_url = f"{url_base}/results?page={page}"
if (view):
_url += f"&results_view={view}"
logging.info(f"Getting data for url {page} of {last_page}: '{_url}'")
response = requests.request("GET", _url, headers=headers)
for row in response.json()["results"]:
if (("date_start" not in row) or ("date_finish" not in row)):
continue;
pipe_values = {}
catalogue = {
"id": row["id"],
"date_start": datetime.datetime.strptime(row["date_start"], "%Y-%m-%dT%H:%M:%SZ"),
"date_finish": datetime.datetime.strptime(row["date_finish"], "%Y-%m-%dT%H:%M:%SZ"),
"date_update": datetime.datetime.strptime(row["date_update"], "%Y-%m-%dT%H:%M:%SZ"),
"pipe_labels": pipe_labels,
"pipe_values": pipe_values,
}
for item in row["items"]:
if "values" not in item:
pipe_values[item["id"]] = item["value"]
continue
match len(item["values"]):
case 0:
pipe_values[item["id"]] = None
case 1:
pipe_values[item["id"]] = item["values"][0]["value"]
case _:
raise NotImplementedError(f"Multiple formsite values; {item['values']}")
catalogue["pipe_combined"] = json.dumps({pipe_labels[key]: value for (key, value) in pipe_values.items()})
catalogue["pipe_labels"] = json.dumps(catalogue["pipe_labels"])
catalogue["pipe_values"] = json.dumps(catalogue["pipe_values"])
data.append(catalogue)
return data
def _posgres_renameKeys(iterable):
""" A recursive function to convert keys to lowercase.
Otherwise, json keys won't match postgresql column names.
iterable (list or dict) - What to iterate over
Example Input: _posgres_renameKeys([{"Lorem": 1, "IPSUM": 2, "dolor": 3}])
"""
if isinstance(iterable, dict):
new_object = {}
for (key, value) in iterable.items():
new_object[key.lower()] = value if not isinstance(iterable, (dict, list, tuple)) else _posgres_renameKeys(value)
return new_object
if isinstance(iterable, (list, tuple)):
return [_posgres_renameKeys(item) for item in iterable]
return iterable
def posgres_insert(data, table, *, method="upsert", insert_method="single", backup=None, lowerNames=True, typeCatalogue=None, **kwargs):
""" Adds data to a posgres database.
See: https://www.psycopg.org/docs/usage.html#query-parameters
Use: http://www.postgresqltutorial.com/postgresql-python/connect/
data (tuple of dict) - What to send to the database
table (str) - Which table to send the data to
method (str) - How to handle sending the data
- upsert: Update existing data in the db, otherwise insert a new row
- drop: Drop all rows in the table and insert new rows
- truncate: Cleanly drop all rows in the table and insert new rows (See: https://stackoverflow.com/questions/11419536/postgresql-truncation-speed/11423886#11423886)
insert_method (str) - How to handle inserting things into the database
- json: Pass in a JSON string with all the data (Does not allow non-serializable inputs such as datetime objects)
- separate: Do an individual insert for each row (Much slower)
- single: Do a single insert statement for every 900 rows
backup (dict or str) - How to backup what is inserted
- kind (required): Used as the string
- dropbox: Send to dropbox
- blob: Send to blob storage
- other keys: kwargs to send
- If str: Assumed to be *backup.kind*
lowerNames (bool) - If object keys should be lower-cased
typeCatalogue (dict) - What type specific columns need to be; where the key is the column name and the value is one of the following strings:
- json: The data should be a JSON string (will fail if the column's value contains non-serializable values)
Example Input: posgres_insert([{"lorem": "ipsum"}], "property")
Example Input: posgres_insert([{"Lorem": "ipsum"}], "property", lowerNames=True)
Example Input: posgres_insert([{"lorem": "ipsum"}], "property", backup="dropbpx")
Example Input: posgres_insert([{"lorem": "ipsum"}], "property", backup={"kind": "dropbpx", "folder": "rps"})
Example Input: posgres_insert([{"lorem": datetime.datetime.now()}], "property", insert_method="separate")
Example Input: posgres_insert(frame, "property")
Example Input: posgres_insert([{"lorem": "ipsum"}], "property", method="drop")
Example Input: posgres_insert([{"lorem": {"ipsum": 1}}], "property", typeCatalogue={"lorem": "json"})
"""
if (isinstance(data, pandas.DataFrame)):
# See: https://pandas.pydata.org/pandas-docs/version/0.17.0/generated/pandas.DataFrame.to_dict.html#pandas.DataFrame.to_dict
data = data.replace({numpy.nan: None}).to_dict("records")
if (not len(data)):
logging.info(f"No data to insert into '{table}'")
return False
if (lowerNames):
data = _posgres_renameKeys(data)
if (typeCatalogue):
for (key, value) in typeCatalogue.items():
for row in data:
if (key not in row):
continue
match value:
case "json":
if (isinstance(row[key], str)):
continue
row[key] = json.dumps(row[key])
case _:
raise KeyError(f"Unknown *typeCatalogue[{key}]* '{value}'")
queries = []
no_update = (method != "upsert")
if (no_update):
match method:
case "drop":
queries.append([f"DELETE FROM {table}", ()])
case "truncate":
queries.append([f"TRUNCATE TABLE {table} RESTART IDENTITY", ()])
case _:
raise KeyError(f"Unknown *method* '{method}'")
match insert_method:
case "json":
queries.append([
f"INSERT INTO {table} SELECT p.* FROM jsonb_populate_recordset(NULL::{table}, %s) as p" +
("" if no_update else f" ON CONFLICT ON CONSTRAINT {table}_pkey DO UPDATE SET {', '.join(f'{key} = EXCLUDED.{key}' for key in data[0].keys())}"),
(json.dumps(data),)
])
case "separate":
for row in data:
keyList = tuple(row.keys())
queries.append([
f"INSERT INTO {table} ({', '.join(keyList)}) VALUES ({', '.join(f'%({key})s' for key in keyList)})" +
("" if no_update else f" ON CONFLICT ON CONSTRAINT {table}_pkey DO UPDATE SET {', '.join(f'{key} = EXCLUDED.{key}' for key in keyList)}"),
row
])
case "single":
keyList = tuple(data[0].keys())
for chunk in (data[i:i+900] for i in range(0, len(data), 900)):
valueList = []
valueCatalogue = {}
for (i, row) in enumerate(chunk):
valueList.append(f"({', '.join(f'%({key}_{i})s' for key in keyList)})")
valueCatalogue.update({f"{key}_{i}": value for (key, value) in row.items()})
queries.append([
f"INSERT INTO {table} ({', '.join(keyList)}) VALUES {', '.join(valueList)}" +
("" if no_update else f" ON CONFLICT ON CONSTRAINT {table}_pkey DO UPDATE SET {', '.join(f'{key} = EXCLUDED.{key}' for key in keyList)}"),
valueCatalogue
])
case _:
raise KeyError(f"Unknown *insert_method* '{insert_method}'")
# See: https://www.psycopg.org/docs/connection.html
logging.info("Making posgres connection...")
connection = psycopg2.connect(**config(**kwargs))
with connection:
connection.autocommit = True
with connection.cursor() as cursor:
logging.info(f"Sending {len(queries)} queries...")
for (query_sql, query_args) in queries:
logging.debug(debugging and f"query_sql: '{query_sql}'")
logging.debug(debugging and f"query_args: '{query_args}'")
cursor.execute(query_sql, query_args or ())
logging.info("Closing posgres connection...")
connection.close()
if (backup):
backup = common.ensure_dict(backup, "kind")
kind = backup.get("kind", None)
if (not backup.get("filename", None)):
# See: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
backup["filename"] = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + ".csv"
match kind:
case "dropbox":
return dropbox_insert(data, folder=table, input_type="csv", **backup)
case "blob":
return blobStorage_insert(data, folder=table, input_type="csv", **backup)
case None:
raise ValueError("Required key missing: *backup.kind*")
case _:
raise KeyError(f"Unknown *backup.kind* '{kind}'")
return True
def posgres_raw(query_sql, query_args=None, *, as_dict=True, alias=None, filterData=None, modifyData=None, **kwargs):
""" Returns the answer to a raw sql statement to ther database.
table (str) - Which table to get data from
Example Input: posgres_select("SELECT * FROM property")
Example Input: posgres_select("SELECT * FROM property WHERE id = %s", (1,))
Example Input: posgres_select("SELECT id FROM property", as_dict=False)
"""
# See: https://www.psycopg.org/docs/connection.html
logging.info("Making posgres connection...")
connection = psycopg2.connect(**config(**kwargs))
data = []
with connection:
cursor = connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) if as_dict else connection.cursor()
with cursor:
logging.info("Sending raw query...")
cursor.execute(query_sql, query_args or ())
data = [dict(catalogue) for catalogue in cursor.fetchall()] if as_dict else cursor.fetchall()
logging.info(f"Recieved '{len(data or ())}' results")
logging.info("Closing posgres connection...")
connection.close()
if (alias):
logging.info("Applying alias to data...")
data = tuple({alias.get(key, key): value for (key, value) in catalogue.items()} for catalogue in data)
if (filterData):
logging.info("Filtering data...")
for myFunction in common.ensure_container(filterData):
data = data.filter(myFunction)
if (modifyData):
logging.info("Modifying data...")
for myFunction in common.ensure_container(modifyData):
data = myFunction(data)
return data
def _blobStorage_getConnection(container, *, account_name="rpbireporting", account_key=None, **kwargs):
""" Returns a blob storage connection.
account_name (str) - The name of the account to connect to
account_key (str)
Example Input: _blobStorage_getConnection("treehouse")
Example Input: _blobStorage_getConnection("ma-extract", account_name="birdeye01reporting", account_key="/<KEY>)
"""
account_key = account_key or config("account_key", "blob")
logging.info(f"Making blob storage connection to '{container}'...")
connection_string = ";".join([
"DefaultEndpointsProtocol=https",
f"AccountName={account_name}",
f"AccountKey={account_key}",
f"BlobEndpoint=https://{account_name}.blob.core.windows.net/",
f"QueueEndpoint=https://{account_name}.queue.core.windows.net/",
f"TableEndpoint=https://{account_name}.table.core.windows.net/",
f"FileEndpoint=https://{account_name}.file.core.windows.net/;",
])
logging.debug(f"Connection string: '{connection_string}'")
return storage_util.DirectoryClient(connection_string, container)
def _yield_fileOutput(data, folder=None, filename=None, *, input_type="csv", walk_allow=("csv",), **kwargs):
""" A generator that yields file handles and their intended destinations based on the input criteria.
data (any) - What to send to the implemented storage
folder (str) - What folder path of the container to store the file(s) in
- If None: Will put the file in the root directory
filename (str) - What file name to use for the file
- If None: Will try coming up with a file name
input_type (str) - How to handle parsing the input data
- raw: Just send it as recieved
- json: If it is not a string
- csv: Make it a csv file
- file: Will use *data* as a filepath to look it up from disk; if there is no file extension it will walk that directory and send all files contained there
walk_allow (tuple) - What file extensions to allow from walking the directory for *input_type*
Example Input: _yield_fileOutput([{Lorem: "ipsum"}])
Example Input: _yield_fileOutput([{Lorem: "ipsum"}], folder="rps")
Example Input: _yield_fileOutput({Lorem: "ipsum"}, input_type="json")
Example Input: _yield_fileOutput("C:/lorem/ipsum", input_type="file")
Example Input: _yield_fileOutput("C:/lorem/ipsum", input_type="file", walk_allow=("csv", "xlsx"))
Example Input: _yield_fileOutput(open("lorem.txt", "r"), filename="lorem.txt", input_type="raw")
"""
data_isPandas = isinstance(data, pandas.DataFrame)
if (data_isPandas):
input_type = "csv"
match input_type:
case "file":
if (os.path.splitext(data)[1]):
destination = os.path.join(folder, os.path.basename(data))
logging.info(f"Send '{data}' to '{destination}'")
with open(file_from, "rb") as handle_file:
yield (handle_file, destination)
return True
for (root, _, files) in os.walk(data):
for filename_source in files:
if (os.path.splitext(filename_source)[1][1:] not in walk_allow):
continue
source = os.path.join(root, filename_source)
destination = os.path.join(folder, filename or filename_source)
logging.info(f"Send '{source}' to '{destination}'")
with open(source, "rb") as handle_file:
yield (handle_file, destination)
case "raw":
destination = os.path.join(folder, filename)
logging.info(f"Send raw data to '{destination}'")
yield (data, destination)
case "json":
destination = os.path.join(folder, filename)
logging.info(f"Send raw json to '{destination}'")
with io.BytesIO(json.dumps(data).encode("utf8")) as handle_file:
yield (handle_file, destination)
case "csv":
if (not len(data)):
logging.info(f"No data to write to csv")
return
destination = os.path.join(folder, filename).replace("\\", "/")
logging.info(f"Send raw csv to '{destination}'")
# See: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_csv.html
# See: https://stackoverflow.com/questions/13120127/how-can-i-use-io-stringio-with-the-csv-module/45608450#45608450
with io.StringIO(newline="") as handle_csv:
frame = data if data_isPandas else pandas.DataFrame(data)
frame.to_csv(handle_csv, header=True, index=False, date_format=r"%Y-%m-%dT%H:%M:%S.%fZ")
yield (io.BytesIO(handle_csv.getvalue().encode('utf8')), destination)
case _:
raise KeyError(f"Unknown *input_type* '{input_type}'")
def blobStorage_insert(data, container="postgres", folder=None, filename=None, *, method="upsert", **kwargs):
""" Sends data to an Azure blob storage.
data (any) - What to send to the blob storage
container (str) - Which blob storage container to store the blob(s) in
folder (str) - What folder path of the container to store the blob(s) in
- If None: Will put the file in the root directory
filename (str) - What file name to use for the blob
- If None: Will try coming up with a file name
method (str) - How to handle sending the data
- upsert: Update existing blob in the folder, otherwise create a new blob
- insert: Try adding it and throw an error if it alrteady exists
- drop: Drop all blobs in the folder and insert new blobs
Example Input: blobStorage_insert([{Lorem: "ipsum"}])
Example Input: blobStorage_insert([{Lorem: "ipsum"}], container="treehouse", folder="rps")
Example Input: blobStorage_insert({Lorem: "ipsum"}, input_type="json")
Example Input: blobStorage_insert("C:/lorem/ipsum", input_type="file")
Example Input: blobStorage_insert("C:/lorem/ipsum", input_type="file", walk_allow=("csv", "xlsx"))
Example Input: blobStorage_insert(open("lorem.txt", "r"), filename="lorem.txt", input_type="raw")
"""
filename = filename or "unknown.txt"
blobStorage = _blobStorage_getConnection(container, **kwargs)
contents = blobStorage.ls_files(folder or "")
if (len(contents)):
match method:
case "drop":
logging.info(f"Dropping the following from '{folder}': {contents}")
for filename_source in contents:
blobStorage.rm(f"{folder}/{filename_source}")
case "upsert":
for filename_source in contents:
if (filename_source.endswith(filename)):
logging.info(f"Dropping '{filename_source}' from '{folder}'")
blobStorage.rm(f"{folder}/{filename_source}")
case "insert":
pass
case _:
raise KeyError(f"Unknown *method* '{method}'")
found = False
for (handle_binary, destination) in _yield_fileOutput(data=data, folder=folder, filename=filename, **kwargs):
found = True
blobStorage.client.upload_blob(name=destination, data=handle_binary.read())
if (not found):
raise ValueError("No files were found")
return True
def blobStorage_select(container="postgres", folder=None, filename=None, *,
input_type="csv", output_type="python", as_dict=True,
multifile_method="append", force_list=False, **kwargs):
""" Returns data from blob storage
container (str or tuple)) - Which blob storage container to store the blob(s) in
- If tuple: Will look in all container names given
folder (str or tuple)) - What folder path of the container to store the blob(s) in
- If None: Will put the file in the root directory
- If tuple: Will look in all folder names given
filename (str or tuple) - What file name to use for the blob
- If None: Will try coming up with a file name
- If tuple: Will look for all files given
output_type (str) - How to return what is in the blob storage
- client: Pre-download file handle
- handle_bin: Post-download file handle
- bin: The raw binary string contents of the blob
- handle_str: Stringified file handle
- str: The raw string contents of the blob
- python: Make it into a python object
output_type (str) - How to interpret the blob when *output_type* is 'python'
- csv: A list of dictionaries
as_dict (bool) - If csv file contrents sholuld be returnded as a list of dictionaries
multifile_method (str) - How to handle when multiple files are requested (Only applies to the 'python' *output_type*; all other output types will use *multifile_method* as 'separate')
- append: Add the results from all following files to the ones from the first
- separate: Each result is a separate item in a list
force_list (bool) - If empty lists or single item lists should still be rreturned as lists
Example Input: blobStorage_select(container="ma-extract", folder="treehouse", filename="Resident.csv")
Example Input: blobStorage_select(container="ma-extract", folder="treehouse", filename="Resident.csv", as_dict=False)
Example Input: blobStorage_select(container="ma-extract", folder="treehouse", filename="Resident.csv", output_type="handle_str")
Example Input: blobStorage_select(container="ma-extract", folder="[treehouse", "vineyards"], filename="Resident.csv")
Example Input: blobStorage_select(container="ma-extract", folder="[treehouse", "vineyards"], filename="Resident.csv", multifile_method="separate")
Example Input: blobStorage_select(container="ma-extract", folder="treehouse", filename="Resident.csv", force_list=True)
"""
def yieldFile():
for _container in common.ensure_container(container):
blobStorage = _blobStorage_getConnection(_container, **kwargs)
for _folder in common.ensure_container(folder):
for _filename in common.ensure_container(filename or "unknown.txt"):
client = blobStorage.client.get_blob_client(blob=os.path.join(_folder, _filename))
if (output_type == "client"):
yield client
continue
handle_blob = client.download_blob()
if (output_type == "handle_blob"):
yield handle_blob
continue
data_bin = handle_blob.readall()
if (output_type == "blob"):
yield data_bin
continue
try:
handle_str = io.TextIOWrapper(io.BytesIO(data_bin), encoding="Windows-1252")
except UnicodeDecodeError as error:
import chardet
handle_str = io.TextIOWrapper(io.BytesIO(data_bin), encoding=chardet.detect(data_bin))
if (output_type == "str"):
yield handle_str.read()
continue
yield handle_str
###############################
output = tuple(yieldFile())
output_count = len(output)
if (not output_count):
return () if force_list else None
if (output_type != "python"):
return output if (force_list or (output_count > 1)) else output[0]
match input_type:
case "csv":
answer = []
for item in output:
# See: https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html#pandas-read-csv
# See: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_csv.html
frame = pandas.read_csv(item)
frame.to_csv(header=True, index=False, date_format=r"%Y-%m-%dT%H:%M:%S.%fZ")
answer.append(frame)
if (multifile_method == "separate"):
return answer
# See: https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html
return pandas.concat(answer, join="outer", ignore_index=True, sort=False)
case _:
raise KeyError(f"Unknown *input_type* '{input_type}'")
def dropbox_insert(data, container="systems_data/report_data_source", folder=None, filename=None, *, method="upsert", token=None, **kwargs):
""" Sends data to dropbox.
data (any) - What to send to the dropbox
container (str) - Which dropbox root folder to store the file(s) in
folder (str) - What folder path of the container to store the file(s) in
- If None: Will put the file in the root directory
filename (str) - What file name to use for the file
- If None: Will try coming up with a file name
method (str) - How to handle sending the data
- upsert: Update existing file in the folder, otherwise create a new file
- insert: Try adding it and throw an error if it alrteady exists
- drop: Drop all blobs in the folder and insert new blobs
token (str) - The access token for the dropbox account
Example Input: dropbox_insert([{Lorem: "ipsum"}])
Example Input: dropbox_insert([{Lorem: "ipsum"}], container="treehouse", folder="rps")
Example Input: dropbox_insert({Lorem: "ipsum"}, input_type="json")
Example Input: dropbox_insert("C:/lorem/ipsum", input_type="file")
Example Input: dropbox_insert("C:/lorem/ipsum", input_type="file", walk_allow=("csv", "xlsx"))
Example Input: dropbox_insert(open("lorem.txt", "r"), filename="lorem.txt", input_type="raw")
"""
filename = filename or "unknown.txt"
token = token or config("token", "dropbox")
dropboxHandle = dropbox.Dropbox(token)
# See: https://dropbox-sdk-python.readthedocs.io/en/latest/api/dropbox.html#dropbox.dropbox_client.Dropbox.files_list_folder
# contents = dropboxHandle.files_list_folder(folder or "")
# if (len(contents)):
# match method:
# case "drop":
# raise NotImplementedError("Dropbox drop all folder contents")
# case "upsert":
# pass
# case "insert":
# raise NotImplementedError("Dropbox insert file or rename to be unique")
# case _:
# raise KeyError(f"Unknown *method* '{method}'")
# See: https://dropbox-sdk-python.readthedocs.io/en/latest/api/dropbox.html#dropbox.dropbox_client.Dropbox.files_upload
for (handle_binary, destination) in _yield_fileOutput(data=data, folder=folder, filename=filename, **kwargs):
dropboxHandle.files_upload(handle_binary.read(), os.path.join("/", container, destination).replace("\\","/"))
return True
def webflow_select(collection_id, limit=-1, *, offset=0, token=None, **kwargs):
""" Returns the data from a webflow collection.
See: https://www.briantsdawson.com/blog/webflow-api-how-to-get-site-collection-and-item-ids-for-zapier-and-parabola-use
collection_id (str) - Which collection to connect to
limit (int) - How much data to return. If less than 1 will return everything
offset (int) - Use in combination with limit being not less than 1
Example Input: webflow_select(collection_id="623107ba68bd7ba11ca033c7")
Example Input: webflow_select(collection_id="623107ba68bd7ba11ca033c7", limit=1)
Example Input: webflow_select(collection_id="623107ba68bd7ba11ca033c7", limit=100, offset=100)
"""
token = token or config("token", "webflow")
webflow_api = Webflow(token=token)
return webflow_api.collection(collection_id=collection_id, limit=limit, all=limit <= 0)["items"]
def webflow_insert(data, collection_id="623107ba68bd7ba11ca033c7", *, method="upsert", upsert_on="_id", live=False, token=None, site_id="623107ba68bd7b6644a033c0", **kwargs):
""" Sends data to a webflow collection.
See: https://www.briantsdawson.com/blog/webflow-api-how-to-get-site-collection-and-item-ids-for-zapier-and-parabola-use
method (str) - How to handle sending the data
- insert: Try adding it and throw an error if it already exists
- drop: Drop all collection items in the folder and insert new collection items
upsert_on (str) - What key to compare updates against
live (bool) - If the change should be applied to the production server instead of the development server
Example Input: webflow_insert([{Lorem: "ipsum"}])
Example Input: webflow_insert([{Lorem: "ipsum"}], collection_id="623107ba68bd7ba11ca033c7")
"""
if (isinstance(data, pandas.DataFrame)):
# See: https://pandas.pydata.org/pandas-docs/version/0.17.0/generated/pandas.DataFrame.to_dict.html#pandas.DataFrame.to_dict
data = data.replace({numpy.nan: None}).to_dict("records")
if (not len(data)):
logging.info(f"No data to insert into '{table}'")
return False
token = token or config("token", "webflow")
webflow_api = Webflow(token=token)
match method:
case "drop":
for item in webflow_api.items(collection_id=collection_id)["items"]:
webflow_api.removeItem(collection_id=collection_id, item_id=item["_id"])
for item in data:
webflow_api.createItem(collection_id=collection_id, item_data=item, live=live)
case "insert":
for item in data:
webflow_api.createItem(collection_id=collection_id, item_data=item, live=live)
case "upsert":
catalogue = {}
catalogue = {item.get(upsert_on, None): item for item in webflow_api.items(collection_id=collection_id)["items"]}
for item in data:
item_existing = catalogue.get(item.get(upsert_on, None), None)
if (not item_existing):
if ("_draft" not in item):
item["_draft"] = False
if ("_archived" not in item):
item["_archived"] = False
webflow_api.createItem(collection_id=collection_id, item_data=item, live=live)
continue
# Check if any changes need to be made
for (key, value) in item.items():
if (item_existing.get(key, None) != value):
webflow_api.patchItem(collection_id=collection_id, item_id=item_existing["_id"], item_data=item, live=live)
break
case _:
raise KeyError(f"Unknown *method* '{method}'")
# Deprecated names
renameKeys = _posgres_renameKeys
get_manage_america_token = _ma_getToken
def get_json_response_ma(token, url, **kwargs):
""" Function to return json response from MA.
token (str) - The token to use for this request
url (str) - The url to send the request to
Example Input: get_json_response_ma(token, "https://n6.manageamerica.com/api/property/?companyId=233")
Example Input: get_json_response_ma(token, ["https://www.lorem.com", "https://www.ipsum.com"])
"""
logging.info("DEPRECATED: Getting MA Data...")
response = requests.request("GET", url,
headers = {"Authorization": f"Bearer {token}"},
data = {},
)
data = response.json()
logging.debug(debugging and f"ma_response: '{data}'")
return data
``` |
{
"source": "JoshMayberry/WordManipulator",
"score": 3
} |
#### File: JoshMayberry/WordManipulator/controller.py
```python
__version__ = "2.5.0"
##Does not use win32com
#Import standard elements
import os
import sys
import warnings
import docx
#Controllers
def build(*args, **kwargs):
"""Starts the GUI making process."""
return Excel(*args, **kwargs)
#Iterators
class Iterator(object):
"""Used by handle objects to iterate over their nested objects."""
def __init__(self, data, filterNone = False):
if (not isinstance(data, (list, dict))):
data = data[:]
self.data = data
if (isinstance(self.data, dict)):
self.order = list(self.data.keys())
if (filterNone):
self.order = [key for key in self.data.keys() if key != None]
else:
self.order = [key if key != None else "" for key in self.data.keys()]
self.order.sort()
self.order = [key if key != "" else None for key in self.order]
def __iter__(self):
return self
def __next__(self):
if (not isinstance(self.data, dict)):
if not self.data:
raise StopIteration
return self.data.pop()
else:
if not self.order:
raise StopIteration
key = self.order.pop()
return self.data[key]
#Global Inheritance Classes
class Utilities():
def __init__(self):
"""Functions to make the Excel module easier.
Example Input: Utilities()
"""
#Internal Variables
self.childCatalogue = {} #{label (str): handle (child)}
def __repr__(self):
representation = f"{type(self).__name__}(id = {id(self)})"
return representation
def __str__(self):
output = f"{type(self).__name__}()\n-- id: {id(self)}\n"
if (hasattr(self, "parent") and (self.parent != None)):
output += f"-- Parent: {self.parent.__repr__()}\n"
return output
def __len__(self):
return len(self[:])
def __contains__(self, key):
return self._get(self.childCatalogue, key, returnExists = True)
def __iter__(self):
return Iterator(self.childCatalogue)
def __getitem__(self, key):
return self._get(self.childCatalogue, key)
def __setitem__(self, key, value):
self.childCatalogue[key] = value
def __delitem__(self, key):
del self.childCatalogue[key]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.entered = False
if (traceback != None):
print(exc_type, exc_value)
return False
def _get(self, itemCatalogue, itemLabel = None, returnExists = False):
"""Searches the label catalogue for the requested object.
itemLabel (any) - What the object is labled as in the catalogue
- If slice: objects will be returned from between the given spots
- If None: Will return all that would be in an unbound slice
Example Input: _get(self.childCatalogue)
Example Input: _get(self.childCatalogue, 0)
Example Input: _get(self.childCatalogue, slice(None, None, None))
Example Input: _get(self.childCatalogue, slice(2, 7, None))
"""
#Account for retrieving all nested
if (itemLabel == None):
itemLabel = slice(None, None, None)
#Account for indexing
if (isinstance(itemLabel, slice)):
if (itemLabel.step != None):
raise FutureWarning(f"Add slice steps to _get() for indexing {self.__repr__()}")
elif ((itemLabel.start != None) and (itemLabel.start not in itemCatalogue)):
errorMessage = f"There is no item labled {itemLabel.start} in the row catalogue for {self.__repr__()}"
raise KeyError(errorMessage)
elif ((itemLabel.stop != None) and (itemLabel.stop not in itemCatalogue)):
errorMessage = f"There is no item labled {itemLabel.stop} in the row catalogue for {self.__repr__()}"
raise KeyError(errorMessage)
handleList = []
begin = False
for item in sorted(itemCatalogue.keys()):
#Allow for slicing with non-integers
if ((not begin) and ((itemLabel.start == None) or (itemCatalogue[item].label == itemLabel.start))):
begin = True
elif ((itemLabel.stop != None) and (itemCatalogue[item].label == itemLabel.stop)):
break
#Slice catalogue via creation date
if (begin):
handleList.append(itemCatalogue[item])
return handleList
elif (itemLabel not in itemCatalogue):
answer = None
else:
answer = itemCatalogue[itemLabel]
if (returnExists):
return answer != None
if (answer != None):
if (isinstance(answer, (list, tuple, range))):
if (len(answer) == 1):
answer = answer[0]
return answer
errorMessage = f"There is no item labled {itemLabel} in the data catalogue for {self.__repr__()}"
raise KeyError(errorMessage)
def getUnique(self, base = "{}", increment = 1, start = 1, exclude = []):
"""Returns a unique name with the given criteria.
Example Input: getUnique()
Example Input: getUnique("Format_{}")
Example Input: getUnique(exclude = [item.database_id for item in self.parent])
"""
if (not isinstance(exclude, (list, tuple, range))):
exclude = [exclude]
while True:
ending = start + increment - 1
if ((base.format(ending) in self) or (base.format(ending) in exclude) or (ending in exclude) or (str(ending) in [str(item) for item in exclude])):
increment += 1
else:
break
return base.format(ending)
class Utilities_Widget(Utilities):
pass
# def __exit__(self, *args, **kwargs):
# answer = super().__exit__(*args, **kwargs)
# self.apply()
# return answer
#Handles
class Word(Utilities):
def __init__(self):
"""Works with word files.
Documentation for docx can be found at: http://python-docx.readthedocs.io/en/latest/
Example Input: Word()
"""
super().__init__()
def new(self, label, *args, **kwargs):
"""Creates a new document ans saves it in memmory.
label (str) - The label of the workbook
firstSheet (str) - The label for the first sheet in the workbook
- If None: The workbook will start off without any sheets
Example Input: new("test")
"""
document = self.Document(self, label, *args, **kwargs)
self[label] = document
return document
def save(self, label, *args, **kwargs):
"""Saves the document to a specified location.
Example Input: save("test")
"""
self[label].save(*args, **kwargs)
def load(self, label, *args, **kwargs):
"""Loads a document from a specified location into memmory.
Example Input: load("test")
"""
self[label].load(*args, **kwargs)
def run(self, label, *args, **kwargs):
"""Opens the ms word file for the user.
Example Input: run("converted")
"""
self[label].run(*args, **kwargs)
class Document(Utilities):
def __init__(self, parent, label):
"""A handle for the workbook.
firstSheet (str) - The label for the first sheet in the workbook
- If None: The workbook will start off without any sheets
Example Input: Document(self, label)
"""
super().__init__()
self.parent = parent
if (label == None):
label = self.getUnique("Document_{}")
self.label = label
self.title = None
self.imageCatalogue = {} #(dict) - Used to catalogue all of the images in the document. {sheet title: [top-left corner cell (row, column), image as a PIL image]}
self.load()
self.setTitle()
# if (firstSheet != None):
# sheet = self.Sheet(self, firstSheet)
# self[label] = sheet
# self.select()
def setTitle(self, title = None):
"""Changes the title of the workbook.
title (str) - The title of the workbook
- If None: Will use the label for the workbook
Example Input: setTitle("test")
"""
self.thing.core_properties.title = title or self.label
def getTitle(self):
"""Returns the title of the workbook.
Example Input: getTitle()
"""
return self.thing.core_properties.title
def setSubject(self, text = None):
"""The topic of the content of the resource"""
self.thing.core_properties.subject = text or ""
def getVersion(self):
return self.thing.core_properties.version
def setVersion(self, text = None):
"""The topic of the content of the resource"""
self.thing.core_properties.version = text or ""
def getSubject(self):
return self.thing.core_properties.subject
def setAuthor(self, text = None):
"""An entity primarily responsible for making the content of the resource"""
self.thing.core_properties.author = text or ""
def getAuthor(self):
return self.thing.core_properties.author
def setCategory(self, text = None):
"""A categorization of the content of this package.
Example values might include: Resume, Letter, Financial Forecast, Proposal, or Technical Presentation.
"""
self.thing.core_properties.category = text or ""
def getCategory(self):
return self.thing.core_properties.category
def setComments(self, text = None):
"""An account of the content of the resource"""
self.thing.core_properties.comments = text or ""
def getComments(self):
return self.thing.core_properties.comments
def setContentStatus(self, time = None):
"""Completion status of the document, e.g. 'draft'"""
self.thing.core_properties.content_status = text or ""
def getContentStatus(self):
return self.thing.core_properties.content_status
def setIdentifier(self, time = None):
"""An unambiguous reference to the resource within a given context, e.g. ISBN"""
self.thing.core_properties.identifier = time or dateTime.now()
def getIdentifier(self):
return self.thing.core_properties.identifier
def setKeywords(self, time = None):
"""Descriptive words or short phrases likely to be used as search terms for this document"""
self.thing.core_properties.keywords = text or ""
def getKeywords(self):
return self.thing.core_properties.keywords
def setLanguage(self, time = None):
"""Language the document is written in"""
self.thing.core_properties.language = text or ""
def getLanguage(self):
return self.thing.core_properties.language
def setRevision(self, value = None):
"""Number of this revision, incremented by Word each time the document is saved.
Note however python-docx does not automatically increment the revision number when it saves a document.
"""
self.thing.core_properties.revision = value or self.getRevision() + 1
def getRevision(self):
return self.thing.core_properties.revision
def setTime_created(self, time = None):
"""Time of intial creation of the document"""
self.thing.core_properties.created = time or dateTime.now()
def getTime_created(self):
return self.thing.core_properties.created
def setTime_printed(self, time = None):
"""Time the document was last printed"""
self.thing.core_properties.last_printed = time or dateTime.now()
def getTime_printed(self):
return self.thing.core_properties.last_printed
def setTime_modified(self, time = None):
"""Time the document was last modified"""
self.thing.core_properties.modified = time or dateTime.now()
def getTime_modified(self):
return self.thing.core_properties.modified
def setLastModifiedBy(self, time = None):
"""Name or other identifier (such as email address) of person who last modified the document"""
self.thing.core_properties.last_modified_by = text or ""
def getLastModifiedBy(self):
return self.thing.core_properties.last_modified_by
def save(self, filePath = "", overlayOk = True, temporary = False, saveImages = True):
"""Saves the workbook to a specified location.
filePath (str) - Where the file is located
overlayOk (bool) - If True: Images can overlap. If False: Any images under otehr ones will be deleted. If None: Images will be scooted to the right until they are ont under another
temporary (bool) - If True: The file will be saved under the same name, but with "_temp" after it. For debugging things
saveImages (bool) - If True: Images in the document will be preserved upon loading
Images, charts, etc. are not read by openpyxl.
In order to preserve images, charts, etc., each image is loaded and re-written into the loaded workbook
Method for preservation from http://www.penwatch.net/cms/?p=582
Help from: code.activestate.com/recipes/528870-class-for-writing-content-to-excel-and-formatting
Example Input: save()
"""
if (temporary):
fileName += "_temp"
else:
fileName = self.label
try:
#Ensure correct format
if ("." not in fileName):
fileName += ".docx"
self.thing.save(os.path.join(filePath, fileName))
except IOError:
#A book by that name is already open
print("ERROR: The word file is still open. The file has still been saved. Just close the current file without saving.")
def load(self, filePath = None, readImages = False):
"""Loads a workbook from a specified location into memmory.
filePath (str) - Where the file is located
- If None: Will create a new, blank document
readImages (bool) - If True: Images in the document will be preserved upon loading
Images, charts, etc. are not read by openpyxl.
In order to preserve images, charts, etc., each image is loaded and re-written into the loaded workbook
Method for preservation from http://www.penwatch.net/cms/?p=582
Help from: code.activestate.com/recipes/528870-class-for-writing-content-to-excel-and-formatting
Example Input: load()
"""
fileName = self.label
#Ensure correct format
if ("." not in fileName):
fileName += ".xlsx"
#Load the workbook into memory
self.thing = docx.Document(docx = filePath)
self.update()
def run(self, filePath = "./"):
"""Opens the word file for the user.
filePath (str) - Where the file is located
Example Input: run()
"""
#Ensure correct format
if ("." not in fileName):
fileName += ".xlsx"
try:
os.startfile(os.path.join(filePath, fileName))
except AttributeError:
subprocess.call(['open', fileName])
def update(self):
self.updateSections()
def updateSections(self):
for i, sectionObject in enumerate(self.thing.sections):
if (i not in self):
self.addSection(thing = sectionObject)
elif (sectionObject is not self[i].thing):
self[i].thing = sectionObject
def getSection(self, index = None):
if (index != None):
if (index > 0):
return self[index]
else:
sectionList = self[:]
return sectionList[index]
return self[:]
def addParagraph(self, *args, **kwargs):
"""Adds a paragraph to the document."""
return self.Paragraph(self, *args, **kwargs)
def addHeader(self, level = 0, *args, **kwargs):
"""Alias for a header paragraph"""
return self.addParagraph(header = level)
def addIntense(self, *args, **kwargs):
"""Alias for an intense quote paragraph"""
return self.addParagraph(intense = True)
def addList(self, bullet = True, *args, **kwargs):
"""Alias for a list paragraph"""
return self.addParagraph(bulletList = bullet)
def addImage(self, *args, **kwargs):
"""Adds an image to the document."""
return self.Image(self, *args, **kwargs)
def addSection(self, *args, **kwargs):
"""Adds a section to the document."""
return self.Section(self, *args, **kwargs)
def addTable(self, *args, **kwargs):
"""Adds a table to the document."""
return self.Table(self, *args, **kwargs)
def addPageBreak(self):
"""Adds a page break to the document."""
self.thing.add_page_break()
class Paragraph(Utilities_Widget):
def __init__(self, parent, header = None, intense = None, bulletList = None, thing = None):
#Intitialize Inherited Modules
super().__init__()
#Internal Variables
self.parent = parent
if (thing != None):
self.thing = thing
else:
if (header != None):
self.thing = self.parent.thing.add_heading(level = header)
elif (intense != None):
self.thing = self.parent.thing.add_paragraph(style = "Intense Quote")
elif (bulletList != None):
if (bulletList):
self.thing = self.parent.thing.add_paragraph(style = "List Bullet")
else:
self.thing = self.parent.thing.add_paragraph(style = "List Number")
else:
self.thing = self.parent.thing.add_paragraph()
def addText(self, text = "", bold = None, italic = None, underline = None):
"""Adds text to the paragraph.
Example Input: addText("Lorem Ipsum")
Example Input: addText("Lorem Ipsum", bold = True, italic = True)
Example Input: addText("Lorem Ipsum", bold = True, italic = True)
"""
segment = self.thing.add_run(text)
if (bold != None):
segment.bold = bold
if (italic != None):
segment.italic = italic
if (underline != None):
segment.underline = underline
class Image(Utilities_Widget):
def __init__(self, parent, filePath, width = None, height = None, thing = None):
#Intitialize Inherited Modules
super().__init__()
#Internal Variables
self.parent = parent
if (width != None):
width = docx.shared.Inches(width)
if (height != None):
width = docx.shared.Inches(height)
if (thing != None):
self.thing = thing
else:
self.thing = self.parent.thing.add_picture(filePath, width = width, height = height)
class Section(Utilities_Widget):
def __init__(self, parent, thing = None):
#Intitialize Inherited Modules
super().__init__()
#Internal Variables
self.parent = parent
if (thing != None):
self.thing = thing
else:
self.thing = self.parent.thing.add_section()
#Nest section in document
self.parent[len(self.parent)] = self
def startOdd(self):
"""Section begins on next odd page"""
self.thing.start_type = docx.enum.section.WD_SECTION.ODD_PAGE
def startEven(self):
"""Section begins on next even page"""
self.thing.start_type = docx.enum.section.WD_SECTION.EVEN_PAGE
def startNew(self):
"""Section begins on next new page"""
self.thing.start_type = docx.enum.section.WD_SECTION.NEW_PAGE
def startNewColumn(self):
"""Section begins on next new column"""
self.thing.start_type = docx.enum.section.WD_SECTION.NEW_COLUMN
def startNone(self):
"""Section begins after the last section"""
self.thing.start_type = docx.enum.section.WD_SECTION.CONTINUOUS
def setSize(self, width = None, height = None):
self.setWidth(width)
self.setHeight(height)
def setWidth(self, value = None):
"""Total page width used for this section, inclusive of all edge spacing values such as margins.
Page orientation is taken into account, so for example, its expected value would be Inches(11) for letter-sized paper when orientation is landscape.
"""
if (value != None):
self.thing.page_width = docx.shared.Inches(value)
def setHeight(self, value = None):
"""Total page height used for this section, inclusive of all edge spacing values such as margins.
Page orientation is taken into account, so for example, its expected value would be Inches(8.5) for letter-sized paper when orientation is landscape.
"""
if (value != None):
self.thing.page_height = docx.shared.Inches(value)
def setHeight_header(self, value = None):
"""Length object representing the distance from the top edge of the page to the top edge of the header.
None if no setting is present in the XML.
"""
if (value != None):
self.thing.header_distance = docx.shared.Inches(value)
def setHeight_footer(self, value = None):
"""Length object representing the distance from the bottom edge of the page to the bottom edge of the footer.
None if no setting is present in the XML.
"""
if (value != None):
self.thing.footer_distance = docx.shared.Inches(value)
def vertical(self, state = True):
if (state):
self.thing.orientation = docx.enum.section.WD_ORIENT.PORTRAIT
else:
self.horizontal()
def horizontal(self, state = True):
if (state):
self.thing.orientation = docx.enum.section.WD_ORIENT.LANDSCAPE
else:
self.vertical()
def setMargins(self, left = None, right = None, top = None, bottom = None):
self.setMargins_left(left)
self.setMargins_right(right)
self.setMargins_top(top)
self.setMargins_bottom(bottom)
def setMargins_left(self, value = None):
"""Length object representing the left margin for all pages in this section in English Metric Units."""
if (value != None):
self.thing.left_margin = docx.shared.Inches(value)
def setMargins_right(self, value = None):
"""Length object representing the right margin for all pages in this section in English Metric Units."""
if (value != None):
self.thing.right_margin = docx.shared.Inches(value)
def setMargins_top(self, value = None):
"""Length object representing the top margin for all pages in this section in English Metric Units."""
if (value != None):
self.thing.top_margin = docx.shared.Inches(value)
def setMargins_bottom(self, value = None):
"""Length object representing the bottom margin for all pages in this section in English Metric Units."""
if (value != None):
self.thing.bottom_margin = docx.shared.Inches(value)
def setGutter(self, value = None):
"""Length object representing the page gutter size in English Metric Units for all pages in this section.
The page gutter is extra spacing added to the inner margin to ensure even margins after page binding.
"""
if (value != None):
self.thing.gutter = docx.shared.Inches(value)
class Table(Utilities_Widget):
def __init__(self, parent, rows = 1, columns = 1, thing = None):
#Intitialize Inherited Modules
super().__init__()
#Internal Variables
self.parent = parent
if (thing != None):
self.thing = thing
else:
self.thing = selfparent.thing.add_table(rows, columns)
def getCells(self):
pass
if (__name__ == "__main__"):
word = Word()
with word.new("test_2") as myDocument:
with myDocument.getSection(-1) as mySection:
mySection.setMargins(0.5, 0.5, 0.5, 0.5)
with myDocument.addHeader() as myHeader:
myHeader.addText("Document Title")
with myDocument.addParagraph() as myParagraph:
myParagraph.addText("A plain paragraph having some ")
myParagraph.addText("bold text", bold = True)
myParagraph.addText(" and some ")
myParagraph.addText("italic text.", italic = True)
with myDocument.addHeader(level = 1) as myHeader:
myHeader.addText("Sub Heading")
with myDocument.addIntense() as myParagraph:
myParagraph.addText("Intense Quote")
# with myDocument.addList(bullet = True) as myList:
# myList.addItem("First item in unordered list")
# myList.addItem("Second item in unordered list")
# with myDocument.addList(bullet = False) as myList:
# myList.addItem("First item in ordered list")
# myList.addItem("Second item in ordered list")
# myDocument.addImage("C:/Users/Kade/Pictures/Untitled.png", width = 1.25)
myDocument.addPageBreak()
# with myDocument.addTable(rows = 1, columns = 3) as myTable:
# for i, cell in enumerate(myTable.getCells()):
# cell.setText(f"Lorem {i}")
myDocument.save()
``` |
{
"source": "joshmcarthur/puppycam",
"score": 2
} |
#### File: joshmcarthur/puppycam/save_snapshot.py
```python
from urllib.request import urlopen
import boto3
import os
import boto3
def event_handler(event = None, context = None):
s3_client = boto3.client("s3")
handle = urlopen(os.getenv("CAMERA_URL"))
s3_client.upload_fileobj(handle, os.getenv("BUCKET_NAME"),
os.getenv("DESTINATION_FILENAME"),
ExtraArgs={'ACL': 'public-read'})
``` |
{
"source": "JoshMcDonagh/Recursive-Bucket-Sort",
"score": 4
} |
#### File: benchmarks/python/quicksort.py
```python
def partition(arr, low, high):
i = (low-1) # index of smaller element
pivot = arr[high] # pivot
for j in range(low, high):
# If current element is smaller than or
# equal to pivot
if arr[j] <= pivot:
# increment index of smaller element
i = i+1
arr[i], arr[j] = arr[j], arr[i]
arr[i+1], arr[high] = arr[high], arr[i+1]
return (i+1)
# The main function that implements QuickSort
# arr[] --> Array to be sorted,
# low --> Starting index,
# high --> Ending index
# Function to do Quick sort
def quickSort(arr, low, high):
if len(arr) == 1:
return arr
if low < high:
# pi is partitioning index, arr[p] is now
# at right place
pi = partition(arr, low, high)
# Separately sort elements before
# partition and after partition
quickSort(arr, low, pi-1)
quickSort(arr, pi+1, high)
# This code is contributed by <NAME>
#This code in improved by https://github.com/anushkrishnav
```
#### File: benchmarks/python/rng.py
```python
import random
def generate_random_array(length, lower, upper):
array = []
for i in range(length):
array.append(random.randint(lower, upper))
return array
def generate_partially_random_array(length, lower, upper):
array = generate_random_array(length, lower, upper)
array.sort()
num_of_random_vals = int(length / 7)
for i in range(num_of_random_vals):
index = random.randint(0, length-1)
array[index] = random.randint(lower, upper)
return array
``` |
{
"source": "joshmcgrath08/basicshare",
"score": 2
} |
#### File: basicshare/service/app_impl.py
```python
import datetime
import logging
import uuid
from constants import MESSAGE_MAX_ACCESSES, MESSAGE_EXPIRATION_SECONDS, READ_RECEIPT_EXPIRATION_SECONDS
from exceptions import HttpError
from message import new_message, message_to_external_json
from notification import send_notification
from read_receipt import new_read_receipt
from recaptcha import verify_recaptcha
import store
logger = logging.getLogger()
def put_message(
sender_name, receiver_type, receiver_value, payload,
description, recaptcha_verification, recaptcha_override):
verify_recaptcha(recaptcha_verification, recaptcha_override)
message = new_message(
sender_name, receiver_type, receiver_value,
payload, description, MESSAGE_EXPIRATION_SECONDS)
logger.info("Storing message {}".format(message.id))
store.put_message(message)
send_notification(message)
return message_to_external_json(message)
def get_message(message_id, nonce):
message_id = uuid.UUID(message_id)
nonce = uuid.UUID(nonce)
message = store.get_message(message_id)
logger.info("Retrieving message {} with nonce {}".format(message_id, nonce))
if message.nonce != nonce:
logger.warn("Invalid nonce for message_id {}. Expected {} but got {}".format(
message_id, message.nonce, nonce))
raise HttpError("Invalid nonce for message", 403, messageId=message_id, nonce=nonce)
if message.valid_until_datetime < datetime.datetime.utcnow():
logger.warn("Message {} has expired due to time".format(message_id))
raise HttpError("Message has expired", 410, messageId=message_id)
if message.access_count > MESSAGE_MAX_ACCESSES:
logger.warn("Message {} has exceeded maximum accesses".format(message_id))
raise HttpError("Message has expired", 410, messageId=message_id)
return message_to_external_json(message)
def mark_as_read(message_id):
message_id = uuid.UUID(message_id)
read_receipt = new_read_receipt(message_id, READ_RECEIPT_EXPIRATION_SECONDS)
logger.info("Storing read receipt {}".format(read_receipt.id))
store.put_read_receipt(read_receipt)
return None
```
#### File: basicshare/service/main.py
```python
from server import app
import wsgi
def lambda_handler(event, context):
response = wsgi.Response()
try:
environ = wsgi.make_environ(event)
response.write(next(app(environ, response.start_response)))
except Exception as e:
print("Unhandle exception in WSGI", e)
raise e
finally:
return response.get_response()
```
#### File: basicshare/service/message.py
```python
import collections
import datetime
import dateutil.parser
import uuid
from ddb_utils import *
Message = collections.namedtuple(
"Message", ["sender_name",
"receiver_type",
"receiver_value",
"payload",
"description",
"id",
"nonce",
"create_datetime",
"valid_until_datetime",
"access_datetime",
"access_count",
"ddb_ttl"])
def new_message(
sender_name, receiver_type, receiver_value,
payload, description, expiration_seconds):
now = datetime.datetime.utcnow()
expiration = now + datetime.timedelta(seconds=expiration_seconds)
delta = datetime.timedelta(seconds=2*expiration_seconds)
ddb_ttl = int((now + delta).strftime("%s"))
return Message(sender_name,
receiver_type,
receiver_value,
payload,
description,
uuid.uuid4(),
uuid.uuid4(),
now,
expiration,
None,
0,
ddb_ttl)
def message_to_ddb(message):
res = {
"sender_name": ddb_str(message.sender_name),
"receiver_type": ddb_str(message.receiver_type),
"receiver_value": ddb_str(message.receiver_value),
"payload": ddb_str(message.payload),
"description": ddb_str(message.description),
"id": ddb_str(str(message.id)),
"nonce": ddb_str(str(message.nonce)),
"create_datetime": ddb_datetime(message.create_datetime),
"valid_until_datetime": ddb_datetime(message.valid_until_datetime),
"access_datetime": ddb_datetime(message.access_datetime),
"access_count": ddb_num(message.access_count),
"ddb_ttl": ddb_num(message.ddb_ttl)
}
return ddb_filter_nulls(res)
def message_to_external_json(message):
def format_datetime(dt):
if dt is None:
return None
else:
return dt.isoformat()
return {
"senderName": message.sender_name,
"receiver": {
"type": message.receiver_type,
"value": message.receiver_value
},
"payload": message.payload,
"description": message.description,
"id": str(message.id),
"nonce": str(message.nonce)
}
def ddb_to_message(ddb_value):
def get(key, transform=lambda x: x):
r = ddb_value.get(key, None)
if r is not None:
r = list(r.values())[0]
if r is not None:
return transform(r)
return None
def get_str(key):
return get(key)
def get_int(key):
return get(key, int)
def get_datetime(key):
return get(key, dateutil.parser.parse)
def get_uuid(key):
return get(key, lambda x: uuid.UUID(x))
return Message(
get_str("sender_name"),
get_str("receiver_type"),
get_str("receiver_value"),
get_str("payload"),
get_str("description"),
get_uuid("id"),
get_uuid("nonce"),
get_datetime("create_datetime"),
get_datetime("valid_until_datetime"),
get_datetime("access_datetime"),
get_int("access_count"),
get_int("ddb_ttl"))
```
#### File: basicshare/service/store.py
```python
import logging
import boto3
import botocore
from aws_clients import DDB
from ddb_utils import id_to_ddb
from exceptions import HttpError
from message import message_to_ddb, ddb_to_message
from read_receipt import read_receipt_to_ddb
MESSAGE_TABLE = "Messages"
READ_RECEIPTS_TABLE = "ReadReceipts"
logger = logging.getLogger()
def put_message(message):
DDB.put_item(TableName=MESSAGE_TABLE, Item=message_to_ddb(message))
def get_message(message_id):
key_obj = id_to_ddb(message_id)
try:
ddb_res = DDB.update_item(TableName=MESSAGE_TABLE,
Key=key_obj,
UpdateExpression="ADD access_count :one",
ExpressionAttributeValues={
":one": {"N": "1"}
},
ReturnValues="ALL_NEW")
return ddb_to_message(ddb_res["Attributes"])
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ValidationException":
logger.warn("Message {} does not exist".format(message_id))
raise HttpError("Message does not exist", 404, messageId=message_id)
else:
raise e
def put_read_receipt(read_receipt):
DDB.put_item(TableName=READ_RECEIPTS_TABLE, Item=read_receipt_to_ddb(read_receipt))
``` |
{
"source": "joshmcgrath08/GovLens",
"score": 3
} |
#### File: GovLens/scrapers/agency_api_service.py
```python
import requests
from scrape_data import scrape_data
class AgencyApiService:
def __init__(self):
self.base_url = "http://127.0.0.1:8000/api/agencies"
def get_all_agencies(self):
try:
all_agency_list = self._get(self.base_url)
return all_agency_list
except Exception as ex:
print(f"Error while retrieving all the agency information: {str(ex)}")
def _get(self, url):
response = requests.get(url,headers={'Content-type': 'application/json'})
return response.json()
if __name__=="__main__":
svc = AgencyApiService()
agens = svc.get_all_agencies()
scraped = scrape_data(agens)
print ("SCRAPED")
```
#### File: GovLens/scrapers/agency_dataaccessor.py
```python
import requests
from requests.auth import HTTPBasicAuth
import json, datetime
class AgencyDataAccessor:
def __init__(self, token, agency_info):
self.base_url = "http://127.0.0.1:8000/api/agencies/"
self.agency_info = agency_info
if token is None:
self.token = "Token <PASSWORD>"
def update_scrape_info(self, scrape_info):
try:
outreach_and_communication = scrape_info['profile']['outreach_and_communication']
self.agency_info['scrape_counter'] = self.agency_info['scrape_counter'] + 1
contact_info = outreach_and_communication['contact_access']['info']
self.agency_info['address'] = contact_info.get('address', None)
self.agency_info['phone_number'] = contact_info.get('phone_number', None)
# todo: get the twitter and facebook links
social_media_info = outreach_and_communication['social_media_access']['info']
if len(social_media_info) > 0:
self.agency_info['facebook'] = self.get_social_media_links(social_media_info, 'facebook')
self.agency_info['twitter'] = self.get_social_media_links(social_media_info, 'twitter')
else:
print(f"social media informatioon not available for the agency {scrape_info['Website']}")
self.agency_info['last_successful_scrape'] = datetime.datetime.now()
agency_url = f"{self.base_url}{self.agency_info['id']}/"
response = requests.put(agency_url, data=self.agency_info, headers={'accept': 'application/json', 'Authorization': self.token})
return response
except Exception as ex:
print(
f"An error occurred while posting the agency information: {str(ex)}")
def get_social_media_links(self, social_media_links, social_media_type):
return next((social_media_link for social_media_link in social_media_links if social_media_type.lower() in social_media_link.lower()), None)
```
#### File: GovLens/scrapers/lighthouse.py
```python
import os, time
import asyncio, aiofiles, aiohttp
import requests, json
from django.conf import settings
from scrapers.base_api_client import ApiClient
from agency_api_service import AgencyApiService
GOOGLE_API_KEY = "" #os.environ['GOOGLE_API_KEY']
PAGE_INSIGHTS_ENDPOINT = "https://www.googleapis.com/pagespeedonline/v5/runPagespeed"
MOBILE_FRIENDLY_ENDPOINT = "https://searchconsole.googleapis.com/v1/urlTestingTools/mobileFriendlyTest:run" # from what i have tested, very hard to automate
'''
Lighthouse has 5 categories of information that can be pulled from a url
- performance
- accessibility
- best_practices
- pwa proressive web app : relatively fast, mobile friendly, secure origin some best practices
- seo search engine optimization '''
class PageInsightsClient(ApiClient):
def __init__(self, api_uri=PAGE_INSIGHTS_ENDPOINT, api_key=GOOGLE_API_KEY):
ApiClient.__init__(self, api_uri, api_key)
def get_page_insights(self, url, category):
data = {
'url': url,
'key': self.api_key,
'category': category
}
return self.get("", data=data)
class GoogleMobileFriendlyClient(ApiClient):
def __init__(self, api_uri=MOBILE_FRIENDLY_ENDPOINT, api_key=GOOGLE_API_KEY):
self.urls = []
self.results = []
ApiClient.__init__(self, api_uri, api_key)
def get_mobile_friendly(self, url, index):
data = {
'url': url
}
params = {
'key': self.api_key
}
return self.post("", index, data=data, params=params)
```
#### File: GovLens/scrapers/load_file.py
```python
import csv, os
import json
from process_agency_info import AgencyInfo
ID_ROW = 1
NAME_ROW = 2
PHONE_ROW = 5
BACKUP_PHONE_ROW = 6
WEBSITE_ROW = 8
TWITTER_ROW = 9
BACKUP_TWITTER_ROW = 10
FACEBOOK_ROW = 17
def fill_agency_objects(filepath=os.path.join(os.path.dirname(__file__),
"./data/agencies.csv")):
with open(filepath) as file:
reader = csv.reader(file)
next(reader, None) # skip the headers
i = 0
all_agency_info=[]
for row in reader:
# only run 100 to test behavior
if i >= 40:
break
agency = {
'id': row[ID_ROW],
'name': row[NAME_ROW],
'website': row[WEBSITE_ROW]
}
agency_instance = AgencyInfo(agency)
agency_details = agency_instance.process_agency_info()
all_agency_info.append(agency_details)
i+=1
print(agency)
all_info_json = json.dumps(all_agency_info)
print(all_info_json)
fill_agency_objects()
```
#### File: scrapers/scrapers/security_scraper.py
```python
import requests, os, json
from .base_scraper import BaseScraper
from agency_dataaccessor import AgencyDataAccessor
from lighthouse import PageInsightsClient
class SecurityScraper(BaseScraper):
def __init__(self, raw_page_content, url):
self.page = raw_page_content
self.url = url
self.apiClient = PageInsightsClient()
def get_security_privacy_info(self):
return {
"https": self.get_http_acess(),
"hsts": self.get_hsts(),
"privacy_policies": self.get_privacy_policies()
}
def get_http_acess(self):
try:
lighthouse_results = self.apiClient.get_page_insights(self.url, 'pwa').content['lighthouseResult']
score = lighthouse_results['audits']['is-on-https']['score']
is_criteria_met = True if score == 1 else False
return self.get_criteria_object(score, is_criteria_met)
except:
print("Error in get_http_acess for", self.url)
def get_hsts(self):
try:
lighthouse_results = self.apiClient.get_page_insights(self.url, 'pwa').content['lighthouseResult']
score = lighthouse_results['audits']['redirects-http']['score']
is_criteria_met = True if score == 1 else False
return self.get_criteria_object(score, is_criteria_met)
except:
print("Error in get_hsts for", self.url)
def get_privacy_policies(self):
is_criteria_met = True if "privacy policy" in self.page.text.lower() else False
return self.get_criteria_object(None, is_criteria_met)
``` |
{
"source": "JoshMcguigan/ignition-web-scripts",
"score": 2
} |
#### File: ignition-web-scripts/src/tagBrowse.py
```python
def doPost(request,session):
browseTags = system.tag.browseTags(system.util.jsonDecode(request['postData'])[u'rootPath'])
tags = [];
containers = [];
for tag in browseTags:
if (tag.isFolder() or tag.isUDT()):
containers.append(tag.name)
else:
tags.append(tag.name)
tagData = {"tags":tags, "containers":containers}
return {'json': system.util.jsonEncode(tagData)}
``` |
{
"source": "joshmcintyre/dedup",
"score": 4
} |
#### File: dedup/src/in.py
```python
import os
import hashlib
import argparse
# Build the directory hash listing
def find_hashes(directory):
# Get a file listing
os.chdir(directory)
names = [ name for name in os.listdir(directory) if os.path.isfile(name) ]
# Create a dict of file hashes, names
hashes = {}
for name in names:
with open(name, "rb") as f:
content = f.read()
digest = hashlib.sha256(content).hexdigest()
hashes[digest] = name
return hashes
# Calculate the list of files that are in 1 but not in 2
def find_diff(hashes, hashes2):
inter = set(hashes).intersection(set(hashes2))
diffset = set(hashes) - inter
diff = { k : hashes[k] for k in diffset }
return diff
# Output the found duplicates
def output_diff(diff):
for digest, name in diff.items():
print("Found file from 1 not in 2 - " + digest[:8] + ": " + name)
# The main entry point for the program
def main():
# Fetch the directory from the command line args
parser = argparse.ArgumentParser(description="Find duplicate files by content")
parser.add_argument("directory", type=str, help="The first directory to compare")
parser.add_argument("directory2", type=str, help="The second directory to compare")
args = parser.parse_args()
# Generate the listing
hashes = find_hashes(args.directory)
hashes2 = find_hashes(args.directory2)
# Calculate the diff
diff = find_diff(hashes, hashes2)
# Output the duplicates list
output_diff(diff)
if __name__ == "__main__":
main()
``` |
{
"source": "josh-mckenzie/cassandra-dtest",
"score": 3
} |
#### File: josh-mckenzie/cassandra-dtest/bootstrap_test.py
```python
import random, time
from dtest import Tester, debug
from tools import *
from assertions import *
from ccmlib.cluster import Cluster
class TestBoostrap(Tester):
def simple_bootstrap_test(self):
cluster = self.cluster
tokens = cluster.balanced_tokens(2)
keys = 10000
# Create a single node cluster
cluster.populate(1, tokens=[tokens[0]]).start()
node1 = cluster.nodes["node1"]
time.sleep(.5)
cursor = self.cql_connection(node1).cursor()
self.create_ks(cursor, 'ks', 1)
self.create_cf(cursor, 'cf', columns={ 'c1' : 'text', 'c2' : 'text' })
for n in xrange(0, keys):
insert_c1c2(cursor, n, "ONE")
node1.flush()
initial_size = node1.data_size()
# Reads inserted data all during the boostrap process. We shouldn't
# get any error
reader = self.go(lambda _: query_c1c2(cursor, random.randint(0, keys-1), "ONE"))
# Boostraping a new node
node2 = new_node(cluster, token=tokens[1])
node2.start()
time.sleep(.5)
reader.check()
node1.cleanup()
time.sleep(.5)
reader.check()
size1 = node1.data_size()
size2 = node2.data_size()
assert_almost_equal(size1, size2)
assert_almost_equal(initial_size, 2 * size1)
def read_from_bootstrapped_node_test(self):
"""Test bootstrapped node sees existing data, eg. CASSANDRA-6648"""
cluster = self.cluster
cluster.populate(3)
version = cluster.version()
cluster.start()
node1 = cluster.nodes['node1']
if version < "2.1":
node1.stress(['-n', '10000'])
else:
node1.stress(['write', 'n=10000', '-rate', 'threads=8'])
node4 = new_node(cluster)
node4.start()
cursor = self.patient_cql_connection(node4).cursor()
cursor.execute('select * from "Keyspace1"."Standard1" limit 10')
assert len(list(cursor)) == 10
```
#### File: josh-mckenzie/cassandra-dtest/concurrent_schema_changes_test.py
```python
import time
import os
import pprint
from threading import Thread
from dtest import Tester, debug
from ccmlib.node import Node
import cql
def wait(delay=2):
"""
An abstraction so that the sleep delays can easily be modified.
"""
time.sleep(delay)
class TestConcurrentSchemaChanges(Tester):
def __init__(self, *argv, **kwargs):
super(TestConcurrentSchemaChanges, self).__init__(*argv, **kwargs)
self.allow_log_errors = True
def prepare_for_changes(self, cursor, namespace='ns1'):
"""
prepares for schema changes by creating a keyspace and column family.
"""
debug("prepare_for_changes() " + str(namespace))
# create a keyspace that will be used
self.create_ks(cursor, "ks_%s" % namespace, 2)
cursor.execute('USE ks_%s' % namespace)
# create a column family with an index and a row of data
query = """
CREATE TABLE cf_%s (
col1 text PRIMARY KEY,
col2 text,
col3 text
);
""" % namespace
cursor.execute(query)
wait(1)
cursor.execute("INSERT INTO cf_%s (col1, col2, col3) VALUES ('a', 'b', 'c');"
% namespace)
# create an index
cursor.execute("CREATE INDEX index_%s ON cf_%s(col2)"%(namespace, namespace))
# create a column family that can be deleted later.
query = """
CREATE TABLE cf2_%s (
col1 uuid PRIMARY KEY,
col2 text,
col3 text
);
""" % namespace
cursor.execute(query)
# make a keyspace that can be deleted
self.create_ks(cursor, "ks2_%s" % namespace, 2)
def make_schema_changes(self, cursor, namespace='ns1'):
"""
makes a heap of changes.
create keyspace
drop keyspace
create column family
drop column family
update column family
drop index
create index (modify column family and add a key)
rebuild index (via jmx)
set default_validation_class
"""
debug("make_schema_changes() " + str(namespace))
cursor.execute('USE ks_%s' % namespace)
# drop keyspace
cursor.execute('DROP KEYSPACE ks2_%s' % namespace)
wait(2)
# create keyspace
self.create_ks(cursor, "ks3_%s" % namespace, 2)
cursor.execute('USE ks_%s' % namespace)
wait(2)
# drop column family
cursor.execute("DROP COLUMNFAMILY cf2_%s" % namespace)
# create column family
query = """
CREATE TABLE cf3_%s (
col1 uuid PRIMARY KEY,
col2 text,
col3 text,
col4 text
);
""" % (namespace)
cursor.execute(query)
# alter column family
query = """
ALTER COLUMNFAMILY cf_%s
ADD col4 text;
""" % namespace
cursor.execute(query)
# add index
cursor.execute("CREATE INDEX index2_%s ON cf_%s(col3)"%(namespace, namespace))
# remove an index
cursor.execute("DROP INDEX index_%s" % namespace)
def validate_schema_consistent(self, node):
""" Makes sure that there is only one schema """
debug("validate_schema_consistent() " + node.name)
host, port = node.network_interfaces['thrift']
conn = cql.connect(host, port, keyspace=None)
schemas = conn.client.describe_schema_versions()
num_schemas = len([ss for ss in schemas.keys() if ss != 'UNREACHABLE'])
assert num_schemas == 1, "There were multiple schema versions: " + pprint.pformat(schemas)
def basic_test(self):
"""
make sevaral schema changes on the same node.
"""
debug("basic_test()")
cluster = self.cluster
cluster.populate(2).start()
node1 = cluster.nodelist()[0]
wait(2)
cursor = self.cql_connection(node1).cursor()
self.prepare_for_changes(cursor, namespace='ns1')
self.make_schema_changes(cursor, namespace='ns1')
def changes_to_different_nodes_test(self):
debug("changes_to_different_nodes_test()")
cluster = self.cluster
cluster.populate(2).start()
[node1, node2] = cluster.nodelist()
wait(2)
cursor = self.cql_connection(node1).cursor()
self.prepare_for_changes(cursor, namespace='ns1')
self.make_schema_changes(cursor, namespace='ns1')
wait(3)
self.validate_schema_consistent(node1)
# wait for changes to get to the first node
wait(20)
cursor = self.cql_connection(node2).cursor()
self.prepare_for_changes(cursor, namespace='ns2')
self.make_schema_changes(cursor, namespace='ns2')
wait(3)
self.validate_schema_consistent(node1)
# check both, just because we can
self.validate_schema_consistent(node2)
def changes_while_node_down_test(self):
"""
makes schema changes while a node is down.
Make schema changes to node 1 while node 2 is down.
Then bring up 2 and make sure it gets the changes.
"""
debug("changes_while_node_down_test()")
cluster = self.cluster
cluster.populate(2).start()
[node1, node2] = cluster.nodelist()
wait(2)
cursor = self.cql_connection(node2).cursor()
self.prepare_for_changes(cursor, namespace='ns2')
node1.stop()
wait(2)
self.make_schema_changes(cursor, namespace='ns2')
wait(2)
node2.stop()
wait(2)
node1.start()
node2.start()
wait(20)
self.validate_schema_consistent(node1)
def changes_while_node_toggle_test(self):
"""
makes schema changes while a node is down.
Bring down 1 and change 2.
Bring down 2, bring up 1, and finally bring up 2.
1 should get the changes.
"""
debug("changes_while_node_toggle_test()")
cluster = self.cluster
cluster.populate(2).start()
[node1, node2] = cluster.nodelist()
wait(2)
cursor = self.cql_connection(node2).cursor()
self.prepare_for_changes(cursor, namespace='ns2')
node1.stop()
wait(2)
self.make_schema_changes(cursor, namespace='ns2')
wait(2)
node2.stop()
wait(2)
node1.start()
node2.start()
wait(20)
self.validate_schema_consistent(node1)
def decommission_node_test(self):
debug("decommission_node_test()")
cluster = self.cluster
cluster.populate(1)
# create and add a new node, I must not be a seed, otherwise
# we get schema disagreement issues for awhile after decommissioning it.
node2 = Node('node2',
cluster,
True,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200',
'0',
None)
cluster.add(node2, False)
[node1, node2] = cluster.nodelist()
node1.start()
node2.start()
wait(2)
cursor = self.cql_connection(node1).cursor()
self.prepare_for_changes(cursor)
node2.decommission()
wait(30)
self.validate_schema_consistent(node1)
self.make_schema_changes(cursor, namespace='ns1')
# create and add a new node
node3 = Node('node3',
cluster,
True,
('127.0.0.3', 9160),
('127.0.0.3', 7000),
'7300',
'0',
None)
cluster.add(node3, True)
node3.start()
wait(30)
self.validate_schema_consistent(node1)
def snapshot_test(self):
debug("snapshot_test()")
cluster = self.cluster
cluster.populate(2).start()
[node1, node2] = cluster.nodelist()
wait(2)
cursor = self.cql_connection(node1).cursor()
self.prepare_for_changes(cursor, namespace='ns2')
wait(2)
cluster.flush()
wait(2)
node1.nodetool('snapshot -t testsnapshot')
node2.nodetool('snapshot -t testsnapshot')
wait(2)
self.make_schema_changes(cursor, namespace='ns2')
wait(2)
cluster.stop()
### restore the snapshots ##
# clear the commitlogs and data
dirs = ( '%s/commitlogs' % node1.get_path(),
'%s/commitlogs' % node2.get_path(),
'%s/data/ks_ns2/cf_ns2' % node1.get_path(),
'%s/data/ks_ns2/cf_ns2' % node2.get_path(),
)
for dirr in dirs:
for f in os.listdir(dirr):
path = os.path.join(dirr, f)
if os.path.isfile(path):
os.unlink(path)
# copy the snapshot. TODO: This could be replaced with the creation of hard links.
os.system('cp -p %s/data/ks_ns2/cf_ns2/snapshots/testsnapshot/* %s/data/ks_ns2/cf_ns2/' % (node1.get_path(), node1.get_path()))
os.system('cp -p %s/data/ks_ns2/cf_ns2/snapshots/testsnapshot/* %s/data/ks_ns2/cf_ns2/' % (node2.get_path(), node2.get_path()))
# restart the cluster
cluster.start()
wait(2)
self.validate_schema_consistent(node1)
def load_test(self):
"""
apply schema changes while the cluster is under load.
"""
debug("load_test()")
cluster = self.cluster
cluster.populate(1).start()
node1 = cluster.nodelist()[0]
version = cluster.version()
wait(2)
cursor = self.cql_connection(node1).cursor()
def stress(args=[]):
debug("Stressing")
node1.stress(args)
debug("Done Stressing")
def compact():
debug("Compacting...")
node1.nodetool('compact')
debug("Done Compacting.")
# put some data into the cluster
if version < "2.1":
stress(['--num-keys=30000'])
else:
stress(['write', 'n=30000', '-rate', 'threads=8'])
# now start stressing and compacting at the same time
tcompact = Thread(target=compact)
tcompact.start()
wait(1)
# now the cluster is under a lot of load. Make some schema changes.
if version >= "1.2":
cursor.execute('USE "Keyspace1"')
wait(1)
cursor.execute('DROP COLUMNFAMILY "Standard1"')
wait(3)
cursor.execute('CREATE COLUMNFAMILY "Standard1" (KEY text PRIMARY KEY)')
else:
cursor.execute('USE Keyspace1')
wait(1)
cursor.execute('DROP COLUMNFAMILY Standard1')
wait(3)
cursor.execute('CREATE COLUMNFAMILY Standard1 (KEY text PRIMARY KEY)')
tcompact.join()
```
#### File: josh-mckenzie/cassandra-dtest/counter_tests.py
```python
from dtest import Tester
from assertions import *
from tools import *
import time
class TestCounters(Tester):
def simple_increment_test(self):
""" Simple incrementation test (Created for #3465, that wasn't a bug) """
cluster = self.cluster
cluster.populate(3).start()
nodes = cluster.nodelist()
cursor = self.patient_cql_connection(nodes[0]).cursor()
self.create_ks(cursor, 'ks', 3)
self.create_cf(cursor, 'cf', validation="CounterColumnType", columns={'c': 'counter'})
cursor.close()
cursors = [ self.patient_cql_connection(node, 'ks').cursor() for node in nodes ]
nb_increment=50
nb_counter=10
for i in xrange(0, nb_increment):
for c in xrange(0, nb_counter):
cursor = cursors[(i + c) % len(nodes)]
if cluster.version() >= '1.2':
cursor.execute("UPDATE cf SET c = c + 1 WHERE key = 'counter%i'" % c, consistency_level='QUORUM')
else:
cursor.execute("UPDATE cf USING CONSISTENCY QUORUM SET c = c + 1 WHERE key = 'counter%i'" % c)
cursor = cursors[i % len(nodes)]
keys = ",".join(["'counter%i'" % c for c in xrange(0, nb_counter)])
if cluster.version() >= '1.2':
cursor.execute("SELECT key, c FROM cf WHERE key IN (%s)" % keys, consistency_level='QUORUM')
else:
cursor.execute("SELECT key, c FROM cf USING CONSISTENCY QUORUM WHERE key IN (%s)" % keys)
res = cursor.fetchall()
assert len(res) == nb_counter
for c in xrange(0, nb_counter):
assert len(res[c]) == 2, "Expecting key and counter for counter%i, got %s" % (c, str(res[c]))
assert res[c][1] == i + 1, "Expecting counter%i = %i, got %i" % (c, i + 1, res[c][0])
def upgrade_test(self):
""" Test for bug of #4436 """
cluster = self.cluster
cluster.populate(2).start()
nodes = cluster.nodelist()
cql_version=None
cursor = self.patient_cql_connection(nodes[0], version=cql_version).cursor()
self.create_ks(cursor, 'ks', 2)
query = """
CREATE TABLE counterTable (
k int PRIMARY KEY,
c counter
)
"""
if cluster.version() >= '1.2':
query = query + "WITH compression = { 'sstable_compression' : 'SnappyCompressor' }"
else:
query = query + "WITH compression_parameters:sstable_compression='SnappyCompressor'"
cursor.execute(query)
keys = range(0, 4)
updates = 50
def make_updates():
cursor = self.patient_cql_connection(nodes[0], keyspace='ks', version=cql_version).cursor()
upd = "UPDATE counterTable SET c = c + 1 WHERE k = %d;"
#upd = "UPDATE counterTable SET c = c + 1 WHERE k = :k%d;"
if cluster.version() >= '1.2':
batch = " ".join(["BEGIN COUNTER BATCH"] + [upd % x for x in keys] + ["APPLY BATCH;"])
else:
batch = " ".join(["BEGIN BATCH USING CONSISTENCY LEVEL QUORUM"] + [upd % x for x in keys] + ["APPLY BATCH;"])
#query = cursor.prepare_query(batch)
kmap = { "k%d" % i : i for i in keys }
for i in range(0, updates):
if cluster.version() >= '1.2':
cursor.execute(batch, consistency_level='QUORUM')
else:
cursor.execute(batch)
#cursor.execute_prepared(query, kmap)
def check(i):
cursor = self.patient_cql_connection(nodes[0], keyspace='ks', version=cql_version).cursor()
if cluster.version() >= '1.2':
cursor.execute("SELECT * FROM counterTable", consistency_level='QUORUM')
else:
cursor.execute("SELECT * FROM counterTable USING CONSISTENCY QUORUM")
assert cursor.rowcount == len(keys), "Expected %d rows, got %d: %s" % (len(keys), cursor.rowcount, str(cursor.fetchall()))
for row in cursor:
assert row[1] == i * updates, "Unexpected value %s" % str(row)
def rolling_restart():
# Rolling restart
for i in range(0, 2):
time.sleep(.2)
nodes[i].nodetool("drain")
nodes[i].stop(wait_other_notice=True)
nodes[i].start(wait_other_notice=True)
time.sleep(.2)
make_updates()
check(1)
rolling_restart()
make_updates()
check(2)
rolling_restart()
make_updates()
check(3)
rolling_restart()
check(3)
```
#### File: josh-mckenzie/cassandra-dtest/global_row_key_cache_test.py
```python
import time
from dtest import Tester, debug
from loadmaker import LoadMaker
class TestGlobalRowKeyCache(Tester):
def __init__(self, *argv, **kwargs):
super(TestGlobalRowKeyCache, self).__init__(*argv, **kwargs)
# When a node goes down under load it prints an error in it's log.
# If we don't allow log errors, then the test will fail.
# self.allow_log_errors = True
def functional_test(self):
"""
Test global caches.
Test that save and load work in the situation when you write to
different CFs. Read 2 or 3 times to make sure the page cache doesn't
skew the results.
"""
# create some rows to insert
NUM_INSERTS = 100
NUM_UPDATES = 10
NUM_DELETES = 1
cluster = self.cluster
cluster.populate(3)
node1 = cluster.nodelist()[0]
for kcsim in (0, 10):
for rcsim in (0, 10):
setup_name = "%d_%d" % (kcsim, rcsim)
ks_name = 'ks_' + setup_name
debug("setup " + setup_name)
cluster.set_configuration_options(values={
'key_cache_size_in_mb': kcsim,
'row_cache_size_in_mb': rcsim,
'row_cache_save_period': 5,
'key_cache_save_period': 5,
})
cluster.start()
time.sleep(.5)
cursor = self.cql_connection(node1).cursor()
self.create_ks(cursor, ks_name, 3)
time.sleep(1) # wait for propagation
host, port = node1.network_interfaces['thrift']
# create some load makers
lm_standard = LoadMaker(host, port,
keyspace_name=ks_name, column_family_type='standard')
lm_counter = LoadMaker(host, port,
keyspace_name=ks_name, column_family_type='standard', is_counter=True)
# insert some rows
lm_standard.generate(NUM_INSERTS)
lm_counter.generate(NUM_INSERTS)
# flush everything to get it into sstables
for node in cluster.nodelist():
node.flush()
debug("Validating")
for i in range(3):
# read and modify multiple times to get data into and invalidated out of the cache.
lm_standard.update(NUM_UPDATES).delete(NUM_DELETES).validate()
lm_counter.generate().validate()
# let the data be written to the row/key caches.
debug("Letting caches be written")
time.sleep(10)
debug("Stopping cluster")
cluster.stop()
time.sleep(1)
debug("Starting cluster")
cluster.start()
time.sleep(5) # read the data back from row and key caches
lm_standard.refresh_connection()
lm_counter.refresh_connection()
debug("Validating again...")
for i in range(2):
# read and modify multiple times to get data into and invalidated out of the cache.
lm_standard.validate()
lm_counter.validate()
cluster.stop()
```
#### File: josh-mckenzie/cassandra-dtest/putget_test.py
```python
from dtest import Tester
import tools
from tools import no_vnodes, create_c1c2_table, ThriftConnection
import time
class TestPutGet(Tester):
def putget_test(self):
""" Simple put/get on a single row, hitting multiple sstables """
self._putget()
def putget_snappy_test(self):
""" Simple put/get on a single row, but hitting multiple sstables (with snappy compression) """
self._putget(compression="Snappy")
def putget_deflate_test(self):
""" Simple put/get on a single row, but hitting multiple sstables (with deflate compression) """
self._putget(compression="Deflate")
# Simple queries, but with flushes in between inserts to make sure we hit
# sstables (and more than one) on reads
def _putget(self, compression=None):
cluster = self.cluster
cluster.populate(3).start()
[node1, node2, node3] = cluster.nodelist()
cursor = self.patient_cql_connection(node1).cursor()
self.create_ks(cursor, 'ks', 3)
self.create_cf(cursor, 'cf', compression=compression)
tools.putget(cluster, cursor)
def non_local_read_test(self):
""" This test reads from a coordinator we know has no copy of the data """
cluster = self.cluster
cluster.populate(3).start()
[node1, node2, node3] = cluster.nodelist()
cursor = self.patient_cql_connection(node1).cursor()
self.create_ks(cursor, 'ks', 2)
create_c1c2_table(self, cursor)
# insert and get at CL.QUORUM (since RF=2, node1 won't have all key locally)
for n in xrange(0, 1000):
tools.insert_c1c2(cursor, n, "QUORUM")
tools.query_c1c2(cursor, n, "QUORUM")
def rangeputget_test(self):
""" Simple put/get on ranges of rows, hitting multiple sstables """
cluster = self.cluster
cluster.populate(3).start()
[node1, node2, node3] = cluster.nodelist()
cursor = self.patient_cql_connection(node1).cursor()
self.create_ks(cursor, 'ks', 2)
self.create_cf(cursor, 'cf')
tools.range_putget(cluster, cursor)
def wide_row_test(self):
""" Test wide row slices """
cluster = self.cluster
cluster.populate(3).start()
[node1, node2, node3] = cluster.nodelist()
cursor = self.patient_cql_connection(node1).cursor()
self.create_ks(cursor, 'ks', 1)
self.create_cf(cursor, 'cf')
key = 'wide'
for x in xrange(1, 5001):
tools.insert_columns(self, cursor, key, 100, offset=x-1)
for size in (10, 100, 1000):
for x in xrange(1, (50001 - size) / size):
tools.query_columns(self, cursor, key, size, offset=x*size-1)
@no_vnodes()
def wide_slice_test(self):
"""
Check slicing a wide row.
See https://issues.apache.org/jira/browse/CASSANDRA-4919
From Sylvain about duplicating:
Ok, so now that I think about it, you can't reproduce that with CQL currently.
You'll have to use the thrift get_paged_slice call as it's the only way to
trigger this.
Then, I think you'll be able to reproduce with the following steps:
1) you'd want to use 2 nodes with RF=1 and with ByteOrderedPartitioner (it's
possible to reproduce with a random partitioner but a tad more painful)
2) picks token for the nodes so that you know what goes on which node. For
example you may want that any row key starting with 'a' goes on node1, and
anything starting with a 'b' goes on node 2.
3) insers data that span the two nodes. Say inserts 20 rows 'a0' ... 'a9' and
'b0' ...'b9' (so 10 rows on each node) with say 10 columns on row.
4) then do a get_paged_slice for keys 'a5' to 'b4' and for the column filter, a
slice filter that picks the fifth last columns.
5) the get_paged_slice is supposed to return 95 columns (it should return the 5
last columns of a5 and then all 10 columns for 'a6' to 'b4'), but without
CASSANDRA-4919 it will return 90 columns only (it will only return the 5 last
columns of 'b0').
"""
cluster = self.cluster
cluster.set_configuration_options(values={'partitioner': 'org.apache.cassandra.dht.ByteOrderedPartitioner'})
cluster.populate(2)
[node1, node2] = cluster.nodelist()
node1.set_configuration_options(values={'initial_token': "a".encode('hex') })
node1.set_configuration_options(values={'initial_token': "b".encode('hex') })
cluster.start()
time.sleep(.5)
cursor = self.patient_cql_connection(node1, version="2.0.0").cursor()
self.create_ks(cursor, 'ks', 1)
query = """
CREATE TABLE test (
k text PRIMARY KEY
);
"""
cursor.execute(query)
time.sleep(.5)
for i in xrange(10):
key_num = str(i).zfill(2)
query1 = "INSERT INTO test (k, 'col0', 'col1', 'col2', 'col3', 'col4', 'col5', 'col6', 'col7', 'col8', 'col9') VALUES ('a%s', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)" % (key_num)
query2 = "INSERT INTO test (k, 'col0', 'col1', 'col2', 'col3', 'col4', 'col5', 'col6', 'col7', 'col8', 'col9') VALUES ('b%s', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)" % (key_num)
cursor.execute(query1)
cursor.execute(query2)
cursor.close()
tc = ThriftConnection(node1, ks_name='ks', cf_name='test')
tc.use_ks()
# Slice on the keys
rnge = tc.Cassandra.KeyRange(
start_key="a%s" % ('5'.zfill(2)),
end_key="b%s" % ('4'.zfill(2)),
count=9999,
)
rows = tc.client.get_paged_slice(
column_family='test',
range=rnge,
start_column='col5',
consistency_level=tc.Cassandra.ConsistencyLevel.ONE,
)
keys = [fd.key for fd in rows]
columns = []
for row in rows:
cols = [col.column.name for col in row.columns]
columns.extend(cols)
#print row.key
#print cols
assert len(columns) == 95, "Regression in cassandra-4919. Expected 95 columns, got %d." % len(columns)
```
#### File: josh-mckenzie/cassandra-dtest/tools.py
```python
import time
from ccmlib.node import Node
from decorator import decorator
from distutils.version import LooseVersion
import cql
import re
import os
from thrift.transport import TTransport, TSocket
from thrift.protocol import TBinaryProtocol
def retry_till_success(fun, *args, **kwargs):
timeout = kwargs.pop('timeout', 60)
bypassed_exception = kwargs.pop('bypassed_exception', Exception)
deadline = time.time() + timeout
while True:
try:
return fun(*args, **kwargs)
except bypassed_exception:
if time.time() > deadline:
raise
else:
# brief pause before next attempt
time.sleep(0.25)
def create_c1c2_table(tester, cursor, read_repair=None):
tester.create_cf(cursor, 'cf', columns={ 'c1' : 'text', 'c2' : 'text' }, read_repair=read_repair)
def insert_c1c2(cursor, key, consistency="QUORUM"):
if cursor.cql_major_version >= 3:
cursor.execute('UPDATE cf SET c1=\'value1\', c2=\'value2\' WHERE key=\'k%d\'' % key, consistency_level=consistency)
else:
cursor.execute('UPDATE cf USING CONSISTENCY %s SET c1=\'value1\', c2=\'value2\' WHERE key=\'k%d\'' % (consistency, key))
def insert_columns(tester, cursor, key, columns_count, consistency="QUORUM", offset=0):
if tester.cluster.version() >= "1.2":
upds = [ "UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%06d\'" % (i, key, i) for i in xrange(offset*columns_count, columns_count*(offset+1))]
query = 'BEGIN BATCH %s; APPLY BATCH' % '; '.join(upds)
cursor.execute(query, consistency_level=consistency)
else:
kvs = [ "c%06d=value%d" % (i, i) for i in xrange(offset*columns_count, columns_count*(offset+1))]
query = 'UPDATE cf USING CONSISTENCY %s SET %s WHERE key=k%s' % (consistency, ', '.join(kvs), key)
cursor.execute(query)
def query_c1c2(cursor, key, consistency="QUORUM"):
if cursor.cql_major_version >= 3:
cursor.execute('SELECT c1, c2 FROM cf WHERE key=\'k%d\'' % key, consistency_level=consistency)
else:
cursor.execute('SELECT c1, c2 FROM cf USING CONSISTENCY %s WHERE key=\'k%d\'' % (consistency, key))
assert cursor.rowcount == 1
res = cursor.fetchone()
assert len(res) == 2 and res[0] == 'value1' and res[1] == 'value2', res
def query_columns(tester, cursor, key, columns_count, consistency="QUORUM", offset=0):
if tester.cluster.version() >= "1.2":
cursor.execute('SELECT c, v FROM cf WHERE key=\'k%s\' AND c >= \'c%06d\' AND c <= \'c%06d\'' % (key, offset, columns_count+offset-1), consistency_level=consistency)
res = cursor.fetchall()
assert len(res) == columns_count, "%s != %s (%s-%s)" % (len(res), columns_count, offset, columns_count+offset-1)
for i in xrange(0, columns_count):
assert res[i][1] == 'value%d' % (i+offset)
else:
cursor.execute('SELECT c%06d..c%06d FROM cf USING CONSISTENCY %s WHERE key=k%s' % (offset, columns_count+offset-1, consistency, key))
assert cursor.rowcount == 1
res = cursor.fetchone()
assert len(res) == columns_count, "%s != %s (%s-%s)" % (len(res), columns_count, offset, columns_count+offset-1)
for i in xrange(0, columns_count):
assert res[i] == 'value%d' % (i+offset)
def remove_c1c2(cursor, key, consistency="QUORUM"):
if cursor.cql_major_version >= 3:
cursor.execute('DELETE c1, c2 FROM cf WHERE key=k%d' % key, consistency_level=consistency)
else:
cursor.execute('DELETE c1, c2 FROM cf USING CONSISTENCY %s WHERE key=k%d' % (consistency, key))
# work for cluster started by populate
def new_node(cluster, bootstrap=True, token=None, remote_debug_port='2000'):
i = len(cluster.nodes) + 1
node = Node('node%s' % i,
cluster,
bootstrap,
('127.0.0.%s' % i, 9160),
('127.0.0.%s' % i, 7000),
str(7000 + i * 100),
remote_debug_port,
token)
cluster.add(node, not bootstrap)
return node
def _put_with_overwrite(cluster, cursor, nb_keys, cl="QUORUM"):
if cluster.version() >= "1.2":
for k in xrange(0, nb_keys):
kvs = [ "UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i, k, i) for i in xrange(0, 100) ]
cursor.execute('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
time.sleep(.01)
cluster.flush()
for k in xrange(0, nb_keys):
kvs = [ "UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i*4, k, i*2) for i in xrange(0, 50) ]
cursor.execute('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
time.sleep(.01)
cluster.flush()
for k in xrange(0, nb_keys):
kvs = [ "UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i*20, k, i*5) for i in xrange(0, 20) ]
cursor.execute('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
time.sleep(.01)
cluster.flush()
else:
for k in xrange(0, nb_keys):
kvs = [ "c%02d=value%d" % (i, i) for i in xrange(0, 100) ]
cursor.execute('UPDATE cf USING CONSISTENCY %s SET %s WHERE key=k%s' % (cl, ','.join(kvs), k))
time.sleep(.01)
cluster.flush()
for k in xrange(0, nb_keys):
kvs = [ "c%02d=value%d" % (i*2, i*4) for i in xrange(0, 50) ]
cursor.execute('UPDATE cf USING CONSISTENCY %s SET %s WHERE key=k%d' % (cl, ','.join(kvs), k))
time.sleep(.01)
cluster.flush()
for k in xrange(0, nb_keys):
kvs = [ "c%02d=value%d" % (i*5, i*20) for i in xrange(0, 20) ]
cursor.execute('UPDATE cf USING CONSISTENCY %s SET %s WHERE key=k%d' % (cl, ','.join(kvs), k))
time.sleep(.01)
cluster.flush()
def _validate_row(cluster, res):
if cluster.version() >= "1.2":
assert len(res) == 100, len(res)
for i in xrange(0, 100):
if i % 5 == 0:
assert res[i][2] == 'value%d' % (i*4), 'for %d, expecting value%d, got %s' % (i, i*4, res[i][2])
elif i % 2 == 0:
assert res[i][2] == 'value%d' % (i*2), 'for %d, expecting value%d, got %s' % (i, i*2, res[i][2])
else:
assert res[i][2] == 'value%d' % i, 'for %d, expecting value%d, got %s' % (i, i, res[i][2])
else:
assert len(res) == 100, len(res)
for i in xrange(0, 100):
if i % 5 == 0:
assert res[i] == 'value%d' % (i*4), 'for %d, expecting value%d, got %s' % (i, i*4, res[i])
elif i % 2 == 0:
assert res[i] == 'value%d' % (i*2), 'for %d, expecting value%d, got %s' % (i, i*2, res[i])
else:
assert res[i] == 'value%d' % i, 'for %d, expecting value%d, got %s' % (i, i, res[i])
# Simple puts and get (on one row), testing both reads by names and by slice,
# with overwrites and flushes between inserts to make sure we hit multiple
# sstables on reads
def putget(cluster, cursor, cl="QUORUM"):
_put_with_overwrite(cluster, cursor, 1, cl)
# reads by name
ks = [ "\'c%02d\'" % i for i in xrange(0, 100) ]
# We do not support proper IN queries yet
#if cluster.version() >= "1.2":
# cursor.execute('SELECT * FROM cf USING CONSISTENCY %s WHERE key=\'k0\' AND c IN (%s)' % (cl, ','.join(ks)))
#else:
# cursor.execute('SELECT %s FROM cf USING CONSISTENCY %s WHERE key=\'k0\'' % (','.join(ks), cl))
#_validate_row(cluster, cursor)
if cluster.version() < "1.2":
cursor.execute('SELECT %s FROM cf USING CONSISTENCY %s WHERE key=\'k0\'' % (','.join(ks), cl))
assert cursor.rowcount == 1
res = cursor.fetchone() #[1:] # removing key
_validate_row(cluster, res)
# slice reads
if cluster.version() >= "1.2":
cursor.execute('SELECT * FROM cf WHERE key=\'k0\'', consistency_level=cl)
_validate_row(cluster, cursor.fetchall())
else:
cursor.execute('SELECT * FROM cf USING CONSISTENCY %s WHERE key=\'k0\'' % cl)
_validate_row(cluster, cursor.fetchone()[1:])
# Simple puts and range gets, with overwrites and flushes between inserts to
# make sure we hit multiple sstables on reads
def range_putget(cluster, cursor, cl="QUORUM"):
keys = 100
_put_with_overwrite(cluster, cursor, keys, cl)
if cluster.version() >= "1.2":
cursor.execute('SELECT * FROM cf LIMIT 10000000')
else:
cursor.execute('SELECT * FROM cf USING CONSISTENCY %s LIMIT 10000000' % cl)
if cluster.version() >= "1.2":
assert cursor.rowcount == keys * 100, cursor.rowcount
for k in xrange(0, keys):
res = cursor.fetchmany(100)
_validate_row(cluster, res)
else:
assert cursor.rowcount == keys
for res in cursor:
res = res[1:] # removing key
_validate_row(cluster, res)
class since(object):
def __init__(self, cass_version, max_version=None):
self.cass_version = LooseVersion(cass_version)
self.max_version = max_version
if self.max_version is not None:
self.max_version = LooseVersion(self.max_version)
def __call__(self, f):
def wrapped(obj):
cluster_version = LooseVersion(obj.cluster.version())
if cluster_version < self.cass_version:
obj.skip("%s < %s" % (cluster_version, self.cass_version))
if self.max_version and \
cluster_version[:len(self.max_version)] > self.max_version:
obj.skip("%s > %s" %(cluster_version, self.max_version))
f(obj)
wrapped.__name__ = f.__name__
wrapped.__doc__ = f.__doc__
return wrapped
from dtest import ENABLE_VNODES
# Use this decorator to skip a test when vnodes are enabled.
class no_vnodes(object):
def __call__(self, f):
def wrapped(obj):
if ENABLE_VNODES:
obj.skip("Test disabled for vnodes")
f(obj)
wrapped.__name__ = f.__name__
wrapped.__doc__ = f.__doc__
return wrapped
class require(object):
def __init__(self, msg):
self.msg = msg
def __call__(self, f):
def wrapped(obj):
obj.skip("require " + self.msg)
f(obj)
wrapped.__name__ = f.__name__
wrapped.__doc__ = f.__doc__
return wrapped
def not_implemented(f):
def wrapped(obj):
obj.skip("this test not implemented")
f(obj)
wrapped.__name__ = f.__name__
wrapped.__doc__ = f.__doc__
return wrapped
class ThriftConnection(object):
"""
A thrift connection. For when CQL doesn't do what we need.
"""
def __init__(self, node=None, host=None, port=None, ks_name='ks', cf_name='cf',
cassandra_interface='11'):
"""
initializes the connection.
- node: a ccm node. If supplied, the host and port, and cassandra_interface
will be pulled from the node.
- host, port: overwritten if node is supplied
- ks_name, cf_name: all operations are done on the supplied ks and cf
- cassandra_interface: '07' and '11' are currently supported. This is the
thrift interface to cassandra. '11' suffices for now except when creating
keyspaces against cassandra0.7, in which case 07 must be used.
"""
if node:
host, port = node.network_interfaces['thrift']
if re.findall('0\.7\.\d+', node.get_cassandra_dir()):
cassandra_interface='07'
self.node = node
self.host = host
self.port = port
self.cassandra_interface = cassandra_interface
# import the correct version of the cassandra thrift interface
# and set self.Cassandra as the imported module
module_name = 'cassandra.v%s' % cassandra_interface
imp = __import__(module_name, globals(), locals(), ['Cassandra'])
self.Cassandra = imp.Cassandra
socket = TSocket.TSocket(host, port)
self.transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(self.transport)
self.client = self.Cassandra.Client(protocol)
socket.open()
self.open_socket = True
self.ks_name = ks_name
self.cf_name = cf_name
def create_ks(self, replication_factor=1):
if self.cassandra_interface == '07':
ks_def = self.Cassandra.KsDef(name=self.ks_name,
strategy_class='org.apache.cassandra.locator.SimpleStrategy',
replication_factor=int(replication_factor),
cf_defs=[])
else:
ks_def = self.Cassandra.KsDef(name=self.ks_name,
strategy_class='org.apache.cassandra.locator.SimpleStrategy',
strategy_options={'replication_factor': str(replication_factor)},
cf_defs=[])
retry_till_success(self.client.system_add_keyspace, ks_def, timeout=30)
time.sleep(0.5)
retry_till_success(self.wait_for_agreement, timeout=10)
time.sleep(0.5)
self.use_ks()
return self
def use_ks(self):
retry_till_success(self.client.set_keyspace, self.ks_name, timeout=30)
return self
def create_cf(self):
cf_def = self.Cassandra.CfDef(name=self.cf_name, keyspace=self.ks_name)
retry_till_success(self.client.system_add_column_family, cf_def, timeout=30)
time.sleep(0.5)
retry_till_success(self.wait_for_agreement, timeout=10)
time.sleep(0.5)
return self
def wait_for_agreement(self):
schemas = self.client.describe_schema_versions()
if len([ss for ss in schemas.keys() if ss != 'UNREACHABLE']) > 1:
raise Exception("schema agreement not reached")
def _translate_cl(self, cl):
return self.Cassandra.ConsistencyLevel._NAMES_TO_VALUES[cl]
def insert_columns(self, num_rows=10, consistency_level='QUORUM'):
""" Insert some basic values """
cf_parent = self.Cassandra.ColumnParent(column_family=self.cf_name)
for row_key in ('row_%d'%i for i in xrange(num_rows)):
col = self.Cassandra.Column(name='col_0', value='val_0',
timestamp=int(time.time()*1000))
retry_till_success(self.client.insert,
key=row_key, column_parent=cf_parent, column=col,
consistency_level=self._translate_cl(consistency_level),
timeout=30)
return self
def query_columns(self, num_rows=10, consistency_level='QUORUM'):
""" Check that the values inserted in insert_columns() are present """
for row_key in ('row_%d'%i for i in xrange(num_rows)):
cpath = self.Cassandra.ColumnPath(column_family=self.cf_name,
column='col_0')
cosc = retry_till_success(self.client.get, key=row_key, column_path=cpath,
consistency_level=self._translate_cl(consistency_level),
timeout=30)
col = cosc.column
value = col.value
assert value == 'val_0', "column did not have the same value that was inserted!"
return self
``` |
{
"source": "josh-mckenzie/python-driver",
"score": 2
} |
#### File: python-driver/cassandra/pool.py
```python
import logging
import socket
import time
from threading import Lock, RLock, Condition
import weakref
try:
from weakref import WeakSet
except ImportError:
from cassandra.util import WeakSet # NOQA
from cassandra import AuthenticationFailed
from cassandra.connection import ConnectionException
from cassandra.policies import HostDistance
log = logging.getLogger(__name__)
class NoConnectionsAvailable(Exception):
"""
All existing connections to a given host are busy, or there are
no open connections.
"""
pass
class Host(object):
"""
Represents a single Cassandra node.
"""
address = None
"""
The IP address or hostname of the node.
"""
conviction_policy = None
"""
A :class:`~.ConvictionPolicy` instance for determining when this node should
be marked up or down.
"""
is_up = None
"""
:const:`True` if the node is considered up, :const:`False` if it is
considered down, and :const:`None` if it is not known if the node is
up or down.
"""
_datacenter = None
_rack = None
_reconnection_handler = None
lock = None
_currently_handling_node_up = False
def __init__(self, inet_address, conviction_policy_factory, datacenter=None, rack=None):
if inet_address is None:
raise ValueError("inet_address may not be None")
if conviction_policy_factory is None:
raise ValueError("conviction_policy_factory may not be None")
self.address = inet_address
self.conviction_policy = conviction_policy_factory(self)
self.set_location_info(datacenter, rack)
self.lock = RLock()
@property
def datacenter(self):
""" The datacenter the node is in. """
return self._datacenter
@property
def rack(self):
""" The rack the node is in. """
return self._rack
def set_location_info(self, datacenter, rack):
"""
Sets the datacenter and rack for this node. Intended for internal
use (by the control connection, which periodically checks the
ring topology) only.
"""
self._datacenter = datacenter
self._rack = rack
def set_up(self):
if not self.is_up:
log.debug("Host %s is now marked up", self.address)
self.conviction_policy.reset()
self.is_up = True
def set_down(self):
self.is_up = False
def signal_connection_failure(self, connection_exc):
return self.conviction_policy.add_failure(connection_exc)
def is_currently_reconnecting(self):
return self._reconnection_handler is not None
def get_and_set_reconnection_handler(self, new_handler):
"""
Atomically replaces the reconnection handler for this
host. Intended for internal use only.
"""
with self.lock:
old = self._reconnection_handler
self._reconnection_handler = new_handler
return old
def __eq__(self, other):
return self.address == other.address
def __hash__(self):
return hash(self.address)
def __lt__(self, other):
return self.address < other.address
def __str__(self):
return str(self.address)
def __repr__(self):
dc = (" %s" % (self._datacenter,)) if self._datacenter else ""
return "<%s: %s%s>" % (self.__class__.__name__, self.address, dc)
class _ReconnectionHandler(object):
"""
Abstract class for attempting reconnections with a given
schedule and scheduler.
"""
_cancelled = False
def __init__(self, scheduler, schedule, callback, *callback_args, **callback_kwargs):
self.scheduler = scheduler
self.schedule = schedule
self.callback = callback
self.callback_args = callback_args
self.callback_kwargs = callback_kwargs
def start(self):
if self._cancelled:
log.debug("Reconnection handler was cancelled before starting")
return
first_delay = next(self.schedule)
self.scheduler.schedule(first_delay, self.run)
def run(self):
if self._cancelled:
return
conn = None
try:
conn = self.try_reconnect()
except Exception as exc:
try:
next_delay = next(self.schedule)
except StopIteration:
# the schedule has been exhausted
next_delay = None
# call on_exception for logging purposes even if next_delay is None
if self.on_exception(exc, next_delay):
if next_delay is None:
log.warn(
"Will not continue to retry reconnection attempts "
"due to an exhausted retry schedule")
else:
self.scheduler.schedule(next_delay, self.run)
else:
if not self._cancelled:
self.on_reconnection(conn)
self.callback(*(self.callback_args), **(self.callback_kwargs))
finally:
if conn:
conn.close()
def cancel(self):
self._cancelled = True
def try_reconnect(self):
"""
Subclasses must implement this method. It should attempt to
open a new Connection and return it; if a failure occurs, an
Exception should be raised.
"""
raise NotImplementedError()
def on_reconnection(self, connection):
"""
Called when a new Connection is successfully opened. Nothing is
done by default.
"""
pass
def on_exception(self, exc, next_delay):
"""
Called when an Exception is raised when trying to connect.
`exc` is the Exception that was raised and `next_delay` is the
number of seconds (as a float) that the handler will wait before
attempting to connect again.
Subclasses should return :const:`False` if no more attempts to
connection should be made, :const:`True` otherwise. The default
behavior is to always retry unless the error is an
:exc:`.AuthenticationFailed` instance.
"""
if isinstance(exc, AuthenticationFailed):
return False
else:
return True
class _HostReconnectionHandler(_ReconnectionHandler):
def __init__(self, host, connection_factory, is_host_addition, on_add, on_up, *args, **kwargs):
_ReconnectionHandler.__init__(self, *args, **kwargs)
self.is_host_addition = is_host_addition
self.on_add = on_add
self.on_up = on_up
self.host = host
self.connection_factory = connection_factory
def try_reconnect(self):
return self.connection_factory()
def on_reconnection(self, connection):
log.info("Successful reconnection to %s, marking node up if it isn't already", self.host)
if self.is_host_addition:
self.on_add(self.host)
else:
self.on_up(self.host)
def on_exception(self, exc, next_delay):
if isinstance(exc, AuthenticationFailed):
return False
else:
log.warning("Error attempting to reconnect to %s, scheduling retry in %s seconds: %s",
self.host, next_delay, exc)
log.debug("Reconnection error details", exc_info=True)
return True
class HostConnection(object):
"""
When using v3 of the native protocol, this is used instead of a connection
pool per host (HostConnectionPool) due to the increased in-flight capacity
of individual connections.
"""
host = None
host_distance = None
is_shutdown = False
_session = None
_connection = None
_lock = None
def __init__(self, host, host_distance, session):
self.host = host
self.host_distance = host_distance
self._session = weakref.proxy(session)
self._lock = Lock()
if host_distance == HostDistance.IGNORED:
log.debug("Not opening connection to ignored host %s", self.host)
return
elif host_distance == HostDistance.REMOTE and not session.cluster.connect_to_remote_hosts:
log.debug("Not opening connection to remote host %s", self.host)
return
log.debug("Initializing connection for host %s", self.host)
self._connection = session.cluster.connection_factory(host.address)
if session.keyspace:
self._connection.set_keyspace_blocking(session.keyspace)
log.debug("Finished initializing connection for host %s", self.host)
def borrow_connection(self, timeout):
if self.is_shutdown:
raise ConnectionException(
"Pool for %s is shutdown" % (self.host,), self.host)
conn = self._connection
if not conn:
raise NoConnectionsAvailable()
with conn.lock:
if conn.in_flight < conn.max_request_id:
conn.in_flight += 1
return conn, conn.get_request_id()
raise NoConnectionsAvailable("All request IDs are currently in use")
def return_connection(self, connection):
with connection.lock:
connection.in_flight -= 1
if connection.is_defunct or connection.is_closed:
log.debug("Defunct or closed connection (%s) returned to pool, potentially "
"marking host %s as down", id(connection), self.host)
is_down = self._session.cluster.signal_connection_failure(
self.host, connection.last_error, is_host_addition=False)
if is_down:
self.shutdown()
else:
self._connection = None
with self._lock:
if self._is_replacing:
return
self._is_replacing = True
self._session.submit(self._replace, connection)
def _replace(self, connection):
log.debug("Replacing connection (%s) to %s", id(connection), self.host)
conn = self._session.cluster.connection_factory(self.host.address)
if self._session.keyspace:
conn.set_keyspace_blocking(self._session.keyspace)
self._connection = conn
with self._lock:
self._is_replacing = False
def shutdown(self):
with self._lock:
if self.is_shutdown:
return
else:
self.is_shutdown = True
if self._connection:
self._connection.close()
def _set_keyspace_for_all_conns(self, keyspace, callback):
if self.is_shutdown or not self._connection:
return
def connection_finished_setting_keyspace(conn, error):
errors = [] if not error else [error]
callback(self, errors)
self._connection.set_keyspace_async(keyspace, connection_finished_setting_keyspace)
def get_state(self):
have_conn = self._connection is not None
in_flight = self._connection.in_flight if have_conn else 0
return "shutdown: %s, open: %s, in_flights: %s" % (self.is_shutdown, have_conn, in_flight)
_MAX_SIMULTANEOUS_CREATION = 1
_MIN_TRASH_INTERVAL = 10
class HostConnectionPool(object):
"""
Used to pool connections to a host for v1 and v2 native protocol.
"""
host = None
host_distance = None
is_shutdown = False
open_count = 0
_scheduled_for_creation = 0
_next_trash_allowed_at = 0
def __init__(self, host, host_distance, session):
self.host = host
self.host_distance = host_distance
self._session = weakref.proxy(session)
self._lock = RLock()
self._conn_available_condition = Condition()
log.debug("Initializing new connection pool for host %s", self.host)
core_conns = session.cluster.get_core_connections_per_host(host_distance)
self._connections = [session.cluster.connection_factory(host.address)
for i in range(core_conns)]
if session.keyspace:
for conn in self._connections:
conn.set_keyspace_blocking(session.keyspace)
self._trash = set()
self._next_trash_allowed_at = time.time()
self.open_count = core_conns
log.debug("Finished initializing new connection pool for host %s", self.host)
def borrow_connection(self, timeout):
if self.is_shutdown:
raise ConnectionException(
"Pool for %s is shutdown" % (self.host,), self.host)
conns = self._connections
if not conns:
# handled specially just for simpler code
log.debug("Detected empty pool, opening core conns to %s", self.host)
core_conns = self._session.cluster.get_core_connections_per_host(self.host_distance)
with self._lock:
# we check the length of self._connections again
# along with self._scheduled_for_creation while holding the lock
# in case multiple threads hit this condition at the same time
to_create = core_conns - (len(self._connections) + self._scheduled_for_creation)
for i in range(to_create):
self._scheduled_for_creation += 1
self._session.submit(self._create_new_connection)
# in_flight is incremented by wait_for_conn
conn = self._wait_for_conn(timeout)
return conn
else:
# note: it would be nice to push changes to these config settings
# to pools instead of doing a new lookup on every
# borrow_connection() call
max_reqs = self._session.cluster.get_max_requests_per_connection(self.host_distance)
max_conns = self._session.cluster.get_max_connections_per_host(self.host_distance)
least_busy = min(conns, key=lambda c: c.in_flight)
request_id = None
# to avoid another thread closing this connection while
# trashing it (through the return_connection process), hold
# the connection lock from this point until we've incremented
# its in_flight count
need_to_wait = False
with least_busy.lock:
if least_busy.in_flight < least_busy.max_request_id:
least_busy.in_flight += 1
request_id = least_busy.get_request_id()
else:
# once we release the lock, wait for another connection
need_to_wait = True
if need_to_wait:
# wait_for_conn will increment in_flight on the conn
least_busy, request_id = self._wait_for_conn(timeout)
# if we have too many requests on this connection but we still
# have space to open a new connection against this host, go ahead
# and schedule the creation of a new connection
if least_busy.in_flight >= max_reqs and len(self._connections) < max_conns:
self._maybe_spawn_new_connection()
return least_busy, request_id
def _maybe_spawn_new_connection(self):
with self._lock:
if self._scheduled_for_creation >= _MAX_SIMULTANEOUS_CREATION:
return
if self.open_count >= self._session.cluster.get_max_connections_per_host(self.host_distance):
return
self._scheduled_for_creation += 1
log.debug("Submitting task for creation of new Connection to %s", self.host)
self._session.submit(self._create_new_connection)
def _create_new_connection(self):
try:
self._add_conn_if_under_max()
except (ConnectionException, socket.error) as exc:
log.warning("Failed to create new connection to %s: %s", self.host, exc)
except Exception:
log.exception("Unexpectedly failed to create new connection")
finally:
with self._lock:
self._scheduled_for_creation -= 1
def _add_conn_if_under_max(self):
max_conns = self._session.cluster.get_max_connections_per_host(self.host_distance)
with self._lock:
if self.is_shutdown:
return False
if self.open_count >= max_conns:
return False
self.open_count += 1
log.debug("Going to open new connection to host %s", self.host)
try:
conn = self._session.cluster.connection_factory(self.host.address)
if self._session.keyspace:
conn.set_keyspace_blocking(self._session.keyspace)
self._next_trash_allowed_at = time.time() + _MIN_TRASH_INTERVAL
with self._lock:
new_connections = self._connections[:] + [conn]
self._connections = new_connections
log.debug("Added new connection (%s) to pool for host %s, signaling availablility",
id(conn), self.host)
self._signal_available_conn()
return True
except (ConnectionException, socket.error) as exc:
log.warning("Failed to add new connection to pool for host %s: %s", self.host, exc)
with self._lock:
self.open_count -= 1
if self._session.cluster.signal_connection_failure(self.host, exc, is_host_addition=False):
self.shutdown()
return False
except AuthenticationFailed:
with self._lock:
self.open_count -= 1
return False
def _await_available_conn(self, timeout):
with self._conn_available_condition:
self._conn_available_condition.wait(timeout)
def _signal_available_conn(self):
with self._conn_available_condition:
self._conn_available_condition.notify()
def _signal_all_available_conn(self):
with self._conn_available_condition:
self._conn_available_condition.notify_all()
def _wait_for_conn(self, timeout):
start = time.time()
remaining = timeout
while remaining > 0:
# wait on our condition for the possibility that a connection
# is useable
self._await_available_conn(remaining)
# self.shutdown() may trigger the above Condition
if self.is_shutdown:
raise ConnectionException("Pool is shutdown")
conns = self._connections
if conns:
least_busy = min(conns, key=lambda c: c.in_flight)
with least_busy.lock:
if least_busy.in_flight < least_busy.max_request_id:
least_busy.in_flight += 1
return least_busy, least_busy.get_request_id()
remaining = timeout - (time.time() - start)
raise NoConnectionsAvailable()
def return_connection(self, connection):
with connection.lock:
connection.in_flight -= 1
in_flight = connection.in_flight
if connection.is_defunct or connection.is_closed:
log.debug("Defunct or closed connection (%s) returned to pool, potentially "
"marking host %s as down", id(connection), self.host)
is_down = self._session.cluster.signal_connection_failure(
self.host, connection.last_error, is_host_addition=False)
if is_down:
self.shutdown()
else:
self._replace(connection)
else:
if connection in self._trash:
with connection.lock:
if connection.in_flight == 0:
with self._lock:
if connection in self._trash:
self._trash.remove(connection)
log.debug("Closing trashed connection (%s) to %s", id(connection), self.host)
connection.close()
return
core_conns = self._session.cluster.get_core_connections_per_host(self.host_distance)
min_reqs = self._session.cluster.get_min_requests_per_connection(self.host_distance)
# we can use in_flight here without holding the connection lock
# because the fact that in_flight dipped below the min at some
# point is enough to start the trashing procedure
if len(self._connections) > core_conns and in_flight <= min_reqs and \
time.time() >= self._next_trash_allowed_at:
self._maybe_trash_connection(connection)
else:
self._signal_available_conn()
def _maybe_trash_connection(self, connection):
core_conns = self._session.cluster.get_core_connections_per_host(self.host_distance)
did_trash = False
with self._lock:
if connection not in self._connections:
return
if self.open_count > core_conns:
did_trash = True
self.open_count -= 1
new_connections = self._connections[:]
new_connections.remove(connection)
self._connections = new_connections
with connection.lock:
if connection.in_flight == 0:
log.debug("Skipping trash and closing unused connection (%s) to %s", id(connection), self.host)
connection.close()
# skip adding it to the trash if we're already closing it
return
self._trash.add(connection)
if did_trash:
self._next_trash_allowed_at = time.time() + _MIN_TRASH_INTERVAL
log.debug("Trashed connection (%s) to %s", id(connection), self.host)
def _replace(self, connection):
should_replace = False
with self._lock:
if connection in self._connections:
new_connections = self._connections[:]
new_connections.remove(connection)
self._connections = new_connections
self.open_count -= 1
should_replace = True
if should_replace:
log.debug("Replacing connection (%s) to %s", id(connection), self.host)
def close_and_replace():
connection.close()
self._add_conn_if_under_max()
self._session.submit(close_and_replace)
else:
# just close it
log.debug("Closing connection (%s) to %s", id(connection), self.host)
connection.close()
def shutdown(self):
with self._lock:
if self.is_shutdown:
return
else:
self.is_shutdown = True
self._signal_all_available_conn()
for conn in self._connections:
conn.close()
self.open_count -= 1
for conn in self._trash:
conn.close()
def ensure_core_connections(self):
if self.is_shutdown:
return
core_conns = self._session.cluster.get_core_connections_per_host(self.host_distance)
with self._lock:
to_create = core_conns - (len(self._connections) + self._scheduled_for_creation)
for i in range(to_create):
self._scheduled_for_creation += 1
self._session.submit(self._create_new_connection)
def _set_keyspace_for_all_conns(self, keyspace, callback):
"""
Asynchronously sets the keyspace for all connections. When all
connections have been set, `callback` will be called with two
arguments: this pool, and a list of any errors that occurred.
"""
remaining_callbacks = set(self._connections)
errors = []
if not remaining_callbacks:
callback(self, errors)
return
def connection_finished_setting_keyspace(conn, error):
remaining_callbacks.remove(conn)
if error:
errors.append(error)
if not remaining_callbacks:
callback(self, errors)
for conn in self._connections:
conn.set_keyspace_async(keyspace, connection_finished_setting_keyspace)
def get_state(self):
in_flights = ", ".join([str(c.in_flight) for c in self._connections])
return "shutdown: %s, open_count: %d, in_flights: %s" % (self.is_shutdown, self.open_count, in_flights)
```
#### File: tests/integration/datatype_utils.py
```python
from decimal import Decimal
import datetime
from uuid import UUID
import pytz
try:
from blist import sortedset
except ImportError:
sortedset = set # noqa
DATA_TYPE_PRIMITIVES = [
'ascii',
'bigint',
'blob',
'boolean',
# 'counter', counters are not allowed inside tuples
'decimal',
'double',
'float',
'inet',
'int',
'text',
'timestamp',
'timeuuid',
'uuid',
'varchar',
'varint',
]
DATA_TYPE_NON_PRIMITIVE_NAMES = [
'list',
'set',
'map',
'tuple'
]
def get_sample_data():
"""
Create a standard set of sample inputs for testing.
"""
sample_data = {}
for datatype in DATA_TYPE_PRIMITIVES:
if datatype == 'ascii':
sample_data[datatype] = 'ascii'
elif datatype == 'bigint':
sample_data[datatype] = 2 ** 63 - 1
elif datatype == 'blob':
sample_data[datatype] = bytearray(b'hello world')
elif datatype == 'boolean':
sample_data[datatype] = True
elif datatype == 'counter':
# Not supported in an insert statement
pass
elif datatype == 'decimal':
sample_data[datatype] = Decimal('12.3E+7')
elif datatype == 'double':
sample_data[datatype] = 1.23E+8
elif datatype == 'float':
sample_data[datatype] = 3.4028234663852886e+38
elif datatype == 'inet':
sample_data[datatype] = '172.16.58.3'
elif datatype == 'int':
sample_data[datatype] = 2147483647
elif datatype == 'text':
sample_data[datatype] = 'text'
elif datatype == 'timestamp':
sample_data[datatype] = datetime.datetime.fromtimestamp(872835240, tz=pytz.timezone('America/New_York')).astimezone(pytz.UTC).replace(tzinfo=None)
elif datatype == 'timeuuid':
sample_data[datatype] = UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66')
elif datatype == 'uuid':
sample_data[datatype] = UUID('067e6162-3b6f-4ae2-a171-2470b63dff00')
elif datatype == 'varchar':
sample_data[datatype] = 'varchar'
elif datatype == 'varint':
sample_data[datatype] = int(str(2147483647) + '000')
else:
raise Exception('Missing handling of %s.' % datatype)
return sample_data
SAMPLE_DATA = get_sample_data()
def get_sample(datatype):
"""
Helper method to access created sample data
"""
return SAMPLE_DATA[datatype]
def get_nonprim_sample(non_prim_type, datatype):
"""
Helper method to access created sample data for non-primitives
"""
if non_prim_type == 'list':
return [get_sample(datatype), get_sample(datatype)]
elif non_prim_type == 'set':
return sortedset([get_sample(datatype)])
elif non_prim_type == 'map':
if datatype == 'blob':
return {get_sample('ascii'): get_sample(datatype)}
else:
return {get_sample(datatype): get_sample(datatype)}
elif non_prim_type == 'tuple':
return (get_sample(datatype),)
else:
raise Exception('Missing handling of non-primitive type {0}.'.format(non_prim_type))
```
#### File: integration/long/test_large_data.py
```python
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # noqa
from struct import pack
import unittest
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.query import dict_factory
from cassandra.query import SimpleStatement
from tests.integration import PROTOCOL_VERSION
from tests.integration.long.utils import create_schema
# Converts an integer to an string of letters
def create_column_name(i):
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
column_name = ''
while True:
column_name += letters[i % 10]
i = i // 10
if not i:
break
if column_name == 'if':
column_name = 'special_case'
return column_name
class LargeDataTests(unittest.TestCase):
def setUp(self):
self.keyspace = 'large_data'
def make_session_and_keyspace(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
session.default_timeout = 20.0 # increase the default timeout
session.row_factory = dict_factory
create_schema(session, self.keyspace)
return session
def batch_futures(self, session, statement_generator):
concurrency = 10
futures = Queue(maxsize=concurrency)
for i, statement in enumerate(statement_generator):
if i > 0 and i % (concurrency - 1) == 0:
# clear the existing queue
while True:
try:
futures.get_nowait().result()
except Empty:
break
future = session.execute_async(statement)
futures.put_nowait(future)
while True:
try:
futures.get_nowait().result()
except Empty:
break
def test_wide_rows(self):
table = 'wide_rows'
session = self.make_session_and_keyspace()
session.execute('CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))' % table)
prepared = session.prepare('INSERT INTO %s (k, i) VALUES (0, ?)' % (table, ))
# Write via async futures
self.batch_futures(session, (prepared.bind((i, )) for i in range(100000)))
# Read
results = session.execute('SELECT i FROM %s WHERE k=0' % (table, ))
# Verify
for i, row in enumerate(results):
self.assertEqual(row['i'], i)
def test_wide_batch_rows(self):
table = 'wide_batch_rows'
session = self.make_session_and_keyspace()
session.execute('CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))' % table)
# Write
statement = 'BEGIN BATCH '
for i in range(2000):
statement += 'INSERT INTO %s (k, i) VALUES (%s, %s) ' % (table, 0, i)
statement += 'APPLY BATCH'
statement = SimpleStatement(statement, consistency_level=ConsistencyLevel.QUORUM)
session.execute(statement)
# Read
results = session.execute('SELECT i FROM %s WHERE k=%s' % (table, 0))
# Verify
for i, row in enumerate(results):
self.assertEqual(row['i'], i)
def test_wide_byte_rows(self):
table = 'wide_byte_rows'
session = self.make_session_and_keyspace()
session.execute('CREATE TABLE %s (k INT, i INT, v BLOB, PRIMARY KEY(k, i))' % table)
prepared = session.prepare('INSERT INTO %s (k, i, v) VALUES (0, ?, 0xCAFE)' % (table, ))
# Write
self.batch_futures(session, (prepared.bind((i, )) for i in range(100000)))
# Read
results = session.execute('SELECT i, v FROM %s WHERE k=0' % (table, ))
# Verify
bb = pack('>H', 0xCAFE)
for row in results:
self.assertEqual(row['v'], bb)
def test_large_text(self):
table = 'large_text'
session = self.make_session_and_keyspace()
session.execute('CREATE TABLE %s (k int PRIMARY KEY, txt text)' % table)
# Create ultra-long text
text = 'a' * 1000000
# Write
session.execute(SimpleStatement("INSERT INTO %s (k, txt) VALUES (%s, '%s')"
% (table, 0, text),
consistency_level=ConsistencyLevel.QUORUM))
# Read
result = session.execute('SELECT * FROM %s WHERE k=%s' % (table, 0))
# Verify
for row in result:
self.assertEqual(row['txt'], text)
def test_wide_table(self):
table = 'wide_table'
table_width = 330
session = self.make_session_and_keyspace()
table_declaration = 'CREATE TABLE %s (key INT PRIMARY KEY, '
table_declaration += ' INT, '.join(create_column_name(i) for i in range(table_width))
table_declaration += ' INT)'
session.execute(table_declaration % table)
# Write
insert_statement = 'INSERT INTO %s (key, '
insert_statement += ', '.join(create_column_name(i) for i in range(table_width))
insert_statement += ') VALUES (%s, '
insert_statement += ', '.join(str(i) for i in range(table_width))
insert_statement += ')'
insert_statement = insert_statement % (table, 0)
session.execute(SimpleStatement(insert_statement, consistency_level=ConsistencyLevel.QUORUM))
# Read
result = session.execute('SELECT * FROM %s WHERE key=%s' % (table, 0))
# Verify
for row in result:
for i in range(table_width):
self.assertEqual(row[create_column_name(i)], i)
```
#### File: integration/long/test_schema.py
```python
import logging
from cassandra import ConsistencyLevel, OperationTimedOut
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
from tests.integration import PROTOCOL_VERSION
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
log = logging.getLogger(__name__)
class SchemaTests(unittest.TestCase):
def test_recreates(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
replication_factor = 3
for i in range(2):
for keyspace in range(5):
keyspace = 'ks_%s' % keyspace
results = session.execute('SELECT keyspace_name FROM system.schema_keyspaces')
existing_keyspaces = [row[0] for row in results]
if keyspace in existing_keyspaces:
ddl = 'DROP KEYSPACE %s' % keyspace
log.debug(ddl)
session.execute(ddl)
ddl = """
CREATE KEYSPACE %s
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '%s'}
""" % (keyspace, str(replication_factor))
log.debug(ddl)
session.execute(ddl)
ddl = 'CREATE TABLE %s.cf (k int PRIMARY KEY, i int)' % keyspace
log.debug(ddl)
session.execute(ddl)
statement = 'USE %s' % keyspace
log.debug(ddl)
session.execute(statement)
statement = 'INSERT INTO %s(k, i) VALUES (0, 0)' % 'cf'
log.debug(statement)
ss = SimpleStatement(statement,
consistency_level=ConsistencyLevel.QUORUM)
session.execute(ss)
def test_for_schema_disagreements_different_keyspaces(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
for i in xrange(30):
try:
session.execute('''
CREATE KEYSPACE test_%s
WITH replication = {'class': 'SimpleStrategy',
'replication_factor': 1}
''' % i)
session.execute('''
CREATE TABLE test_%s.cf (
key int,
value int,
PRIMARY KEY (key))
''' % i)
for j in xrange(100):
session.execute('INSERT INTO test_%s.cf (key, value) VALUES (%s, %s)' % (i, j, j))
session.execute('''
DROP KEYSPACE test_%s
''' % i)
except OperationTimedOut: pass
def test_for_schema_disagreements_same_keyspace(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
for i in xrange(30):
try:
session.execute('''
CREATE KEYSPACE test
WITH replication = {'class': 'SimpleStrategy',
'replication_factor': 1}
''')
session.execute('''
CREATE TABLE test.cf (
key int,
value int,
PRIMARY KEY (key))
''')
for j in xrange(100):
session.execute('INSERT INTO test.cf (key, value) VALUES (%s, %s)' % (j, j))
session.execute('''
DROP KEYSPACE test
''')
except OperationTimedOut: pass
```
#### File: tests/unit/test_parameter_binding.py
```python
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.encoder import Encoder
from cassandra.query import bind_params, ValueSequence
from cassandra.query import PreparedStatement, BoundStatement
from cassandra.cqltypes import Int32Type
from cassandra.util import OrderedDict
from six.moves import xrange
class ParamBindingTest(unittest.TestCase):
def test_bind_sequence(self):
result = bind_params("%s %s %s", (1, "a", 2.0), Encoder())
self.assertEqual(result, "1 'a' 2.0")
def test_bind_map(self):
result = bind_params("%(a)s %(b)s %(c)s", dict(a=1, b="a", c=2.0), Encoder())
self.assertEqual(result, "1 'a' 2.0")
def test_sequence_param(self):
result = bind_params("%s", (ValueSequence((1, "a", 2.0)),), Encoder())
self.assertEqual(result, "( 1 , 'a' , 2.0 )")
def test_generator_param(self):
result = bind_params("%s", ((i for i in xrange(3)),), Encoder())
self.assertEqual(result, "[ 0 , 1 , 2 ]")
def test_none_param(self):
result = bind_params("%s", (None,), Encoder())
self.assertEqual(result, "NULL")
def test_list_collection(self):
result = bind_params("%s", (['a', 'b', 'c'],), Encoder())
self.assertEqual(result, "[ 'a' , 'b' , 'c' ]")
def test_set_collection(self):
result = bind_params("%s", (set(['a', 'b']),), Encoder())
self.assertIn(result, ("{ 'a' , 'b' }", "{ 'b' , 'a' }"))
def test_map_collection(self):
vals = OrderedDict()
vals['a'] = 'a'
vals['b'] = 'b'
vals['c'] = 'c'
result = bind_params("%s", (vals,), Encoder())
self.assertEqual(result, "{ 'a' : 'a' , 'b' : 'b' , 'c' : 'c' }")
def test_quote_escaping(self):
result = bind_params("%s", ("""'ef''ef"ef""ef'""",), Encoder())
self.assertEqual(result, """'''ef''''ef"ef""ef'''""")
class BoundStatementTestCase(unittest.TestCase):
def test_invalid_argument_type(self):
keyspace = 'keyspace1'
column_family = 'cf1'
column_metadata = [
(keyspace, column_family, 'foo1', Int32Type),
(keyspace, column_family, 'foo2', Int32Type)
]
prepared_statement = PreparedStatement(column_metadata=column_metadata,
query_id=None,
routing_key_indexes=[],
query=None,
keyspace=keyspace,
protocol_version=2)
bound_statement = BoundStatement(prepared_statement=prepared_statement)
values = ['nonint', 1]
try:
bound_statement.bind(values)
except TypeError as e:
self.assertIn('foo1', str(e))
self.assertIn('Int32Type', str(e))
self.assertIn('str', str(e))
else:
self.fail('Passed invalid type but exception was not thrown')
values = [1, ['1', '2']]
try:
bound_statement.bind(values)
except TypeError as e:
self.assertIn('foo2', str(e))
self.assertIn('Int32Type', str(e))
self.assertIn('list', str(e))
else:
self.fail('Passed invalid type but exception was not thrown')
def test_inherit_fetch_size(self):
keyspace = 'keyspace1'
column_family = 'cf1'
column_metadata = [
(keyspace, column_family, 'foo1', Int32Type),
(keyspace, column_family, 'foo2', Int32Type)
]
prepared_statement = PreparedStatement(column_metadata=column_metadata,
query_id=None,
routing_key_indexes=[],
query=None,
keyspace=keyspace,
protocol_version=2,
fetch_size=1234)
bound_statement = BoundStatement(prepared_statement=prepared_statement)
self.assertEqual(1234, bound_statement.fetch_size)
``` |
{
"source": "joshmckinney/appsaway",
"score": 2
} |
#### File: backend/tests/test_forms.py
```python
import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from backend.models import Company
from backend.forms import Company as CompanyForm
from backend.forms import ContractApplication as ContractForm
from backend.forms import FreelanceApplication as FreelanceForm
from backend.forms import PermanentApplication as PermanentForm
class TestCompanyForm(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="Test User", password="Password")
def test_company_form(self):
form_data = {
"company_name": "Test Company",
"company_notes": "Some notes",
"contact_name": "Some Contact",
"contact_phone": "(555) 123-4567",
"contact_email": "<EMAIL>",
"user": self.user
}
form = CompanyForm(data=form_data)
self.assertTrue(form.is_valid())
class TestContractApplicationForm(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="Test User", password="Password")
self.company = Company.objects.create(company_name="Test Company", user=self.user)
def test_contract_form(self):
form_data = {
"company": self.company.pk,
"user": self.user.pk,
"app_job_title": "Test Job",
"app_date": datetime.date.today(),
"followup_date": datetime.date.today(),
"interview_date": datetime.date.today(),
"app_notes": "Some job notes",
"status": "Hired",
"contract_start": datetime.date.today(),
"contract_end": datetime.date.today()
}
form = ContractForm(data=form_data)
self.assertTrue(form.is_valid())
class TestFreelanceApplicationForm(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="Test User", password="Password")
self.company = Company.objects.create(company_name="Test Company", user=self.user)
def test_freelance_form(self):
form_data = {
"company": self.company.pk,
"user": self.user.pk,
"app_job_title": "Test Job",
"app_date": datetime.date.today(),
"followup_date": datetime.date.today(),
"interview_date": datetime.date.today(),
"app_notes": "Some job notes",
"status": "Hired",
"freelance_details": "Another Gig",
"freelance_bid": 25.00
}
form = FreelanceForm(data=form_data)
self.assertTrue(form.is_valid())
class TestPermanentApplicationForm(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="Test User", password="Password")
self.company = Company.objects.create(company_name="Test Company", user=self.user)
def test_freelance_form(self):
form_data = {
"company": self.company.pk,
"user": self.user.pk,
"app_job_title": "Test Job",
"app_date": datetime.date.today(),
"followup_date": datetime.date.today(),
"interview_date": datetime.date.today(),
"app_notes": "Some job notes",
"status": "Hired"
}
form = PermanentForm(data=form_data)
self.assertTrue(form.is_valid())
```
#### File: backend/tests/test_models.py
```python
from django.test import TestCase
from django.contrib.auth.models import User
import datetime
from backend.models import Company, ContractApplication, FreelanceApplication, PermanentApplication
# Test Company Model
class CompanyModelTestCase(TestCase):
def setUp(self):
User.objects.create_user(username="Test User")
self.user = User.objects.get(username="Test User")
def test_create_company(self):
self.company = Company.objects.create(
user=self.user,
company_name="Test Company",
company_notes="This is a note",
contact_name="Test Contact",
contact_phone="(555) 123-4567",
contact_email="<EMAIL>"
)
self.assertTrue(Company.objects.get(company_id=1))
# Test Application Models
class AppModelTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(username="Test User")
def setUp(self):
self.company = Company.objects.create(
user=self.user,
company_name="Test Company",
company_notes="This is a note",
contact_name="Test Contact",
contact_phone="(555) 123-4567",
contact_email="<EMAIL>"
)
def test_create_contract_app(self):
ContractApplication.objects.create(
user=self.user,
company=self.company,
app_job_title="Test Contract",
app_date=datetime.date.today(),
followup_date=datetime.date.today(),
interview_date=datetime.date.today(),
app_notes="This is a note",
status="In Progress",
contract_start=datetime.date.today(),
contract_end=datetime.date.today()
)
self.assertTrue(ContractApplication.objects.get(app_id=1))
def test_create_freelance_app(self):
FreelanceApplication.objects.create(
user=self.user,
company=self.company,
app_job_title="Test Freelance",
app_date=datetime.date.today(),
followup_date=datetime.date.today(),
interview_date=datetime.date.today(),
app_notes="This is a note",
status="In Progress",
freelance_details="Make a website",
freelance_bid=100.00
)
self.assertTrue(FreelanceApplication.objects.get(app_id=1))
def test_create_permanent_app(self):
PermanentApplication.objects.create(
user=self.user,
company=self.company,
app_job_title="Test Freelance",
app_date=datetime.date.today(),
followup_date=datetime.date.today(),
interview_date=datetime.date.today(),
app_notes="This is a note",
status="In Progress"
)
self.assertTrue(PermanentApplication.objects.get(app_id=1))
``` |
{
"source": "JoshMcKinstry/Dork_Game_team_octosquad",
"score": 3
} |
#### File: Dork_Game_team_octosquad/dork/item_manager.py
```python
from dork.items import Item
DICT_ITEMS = {}
def assembling_items(names, descriptions, properties):
"""
Constructs item objects for all items in game and stores all the items
inside a dictionary.
Parameters:
names(list): A list that contains all the items available to game.
description(list): A list that has all the item descriptions.
properties(list): A list of lists that contain all the
properties associated with an item.
"""
scope = range(len(names))
for i in scope:
item = Item(names[i], descriptions[i], properties[i])
DICT_ITEMS.update({item.name: item})
def is_item(item_name):
"""
A function that verifies that an object is a valid item.
Parameters:
item_name(str): Holds the name of item.
Returns:
bool: Returns true if item is in dictionary. Returns false otherwise.
"""
return item_name in DICT_ITEMS.keys()
def item_description(item_name):
"""
A function that gets the item description.
Parameters:
item_name(str): Name of item in dictionary
Returns:
dict: String description of item.
"""
return DICT_ITEMS[item_name].description
def items_yaml_representation():
"""
Creates a yaml friendly representation of the set of items.
Returns:
items_repr(dict): Returns dictionary that contains all item objects.
"""
items = {}
items_repr = {'Items': items}
for item_obj in list(DICT_ITEMS.values()):
items.update(item_obj.yaml_representation())
return items_repr
```
#### File: Dork_Game_team_octosquad/tests/test_character_manager.py
```python
import dork.character_manager as character_m
def test_assembling_player():
"""
Test for assembling player
"""
position = 'Entrance'
inventory = 'Donut'
character_m.assembling_player(position, inventory)
assert character_m.DICT_CHARACTERS['Player'].position == 'Entrance'
assert character_m.DICT_CHARACTERS['Player'].inventory == 'Donut'
def test_player_position():
"""
Test player position
"""
position = 'Entrance'
inventory = 'Donut'
character_m.assembling_player(position, inventory)
assert character_m.player_position() == 'Entrance'
def test_player_inventory():
"""
Test player inventory
"""
position = 'Entrance'
inventory = 'Donut'
character_m.assembling_player(position, inventory)
assert character_m.player_inventory() == 'Donut'
def test_update_player_position():
"""
Test updated player position
"""
position = 'Entrance'
inventory = 'Donut'
character_m.assembling_player(position, inventory)
character_m.update_player_position('Trail')
assert character_m.DICT_CHARACTERS['Player'].position == 'Trail'
def test_player_has_item():
"""
Test for the player_has_item method
"""
position = 'Entrance'
inventory = 'Donut'
character_m.assembling_player(position, inventory)
assert character_m.player_has_item('Donut') is True
def test_update_player_inventory():
"""
Test for the update_player_inventory method
"""
position = 'Entrance'
inventory = ['Donut']
character_m.assembling_player(position, inventory)
character_m.update_player_inventory('Flower')
assert character_m.DICT_CHARACTERS['Player'].inventory == [
'Donut', 'Flower']
def test_remove_item_from_inventory():
"""
Test for the update_player_position
"""
position = 'Entrance'
inventory = ['Donut', 'Flower']
character_m.assembling_player(position, inventory)
character_m.remove_item_from_inventory('Flower')
assert character_m.DICT_CHARACTERS['Player'].inventory == ['Donut']
def test_yaml_representation():
"""
Test for the yaml representation method
"""
position = 'Entrance'
inventory = ['Donut', 'Flower']
character_m.assembling_player(position, inventory)
assert isinstance(character_m.player_yaml_representation(), dict)
```
#### File: Dork_Game_team_octosquad/tests/test_dork_cli.py
```python
from types import FunctionType
from unittest.mock import patch
import pytest
import dork.cli as cli
# pylint: disable=protected-access
def test_repl_exists():
"""the dork repl should exist
"""
expect = "Dork.cli should define a repl method"
assert "repl" in vars(cli), expect
assert isinstance(cli.repl, FunctionType)
def test_read_exists():
"""the cli.read should exist
"""
expect = "Dork.cli should define a read method"
assert "read" in vars(cli), expect
assert isinstance(cli.read, FunctionType)
def test_evaluate_exists():
"""the cli.evaluate should exist
"""
expect = "Dork.cli should define an evaluate method"
assert "evaluate" in vars(cli), expect
assert isinstance(cli.evaluate, FunctionType)
@pytest.mark.parametrize("expected, actual", [
("", ""),
("words go here", "words go here"),
("555", "555")
])
def test_read_takes_any_input(expected, actual):
"""the repl read function should accept any input
"""
with patch('builtins.input', return_value=actual, autospec=True):
assert cli.read() == expected
def test_print_load(run):
"""Test _print_load outputs correctly
"""
output, _, _ = run(cli._print_load)
assert "Loading previous" in output, "_print_load should print a message"
def test_menu_evaluate(run):
"""Test the menu evaluate method
"""
output, _, _ = run(cli._menu_evaluate, ['help'])
assert "Main Menu Commands" in output, "\
help command provides help"
output, _, _ = run(cli._menu_evaluate, ['new'])
assert "Starting the game" in output, "new command should start a new game"
output, _, _ = run(cli._menu_evaluate, ['impossible'])
assert "Please input a valid command" in output, "handles bad commands"
def test_game_evaluate(run):
"""Test
"""
output, _, _ = run(cli._game_evaluate, ['notaction'])
assert "Please provide a command" in output, "handles bad commands"
output, _, _ = run(cli._game_evaluate, ['load'])
assert output == ''
assert cli._game_evaluate(['save']) == cli.State(4)
assert cli._game_evaluate(['take', 'flyer']) == cli.State(2)
@pytest.mark.parametrize('inputs', [('y'), ('n'), ('bad')])
def test_safe_quit(run, inputs):
"""Test
"""
output, _, _ = run(cli._safe_quit, input_side_effect=[inputs])
assert "Would you like to save" in output, "game asks for save"
def test_repl(run):
"""Test that game can start and quit
"""
with pytest.raises(SystemExit):
output, _, _ = run(cli.repl, input_side_effect=['quit'])
assert "Welcome to the Game" in output, "game should start from menu"
assert "Leaving Dork" in output, "game should quit from menu"
output, _, _ = run(cli.repl, input_side_effect=['info', 'quit'])
assert "What is Dork?" in output, "game should print info from menu"
output, _, _ = run(cli.repl, input_side_effect=['load', 'efwwef'])
assert "Please input a valid command!" in output, "it broke"
def test_print_info(run):
"""Test that description of game is printed out
"""
output, _, _ = run(cli._print_info)
assert "What is Dork" in output, "game should have a description"
@pytest.mark.parametrize('command', ['', 'move', 'use'])
def test_game_helper(run, command):
"""Test that game prints help messages
"""
output, _, _ = run(cli._game_helper, command)
if command == '':
assert "List of in game commands" in output, "return command list"
elif command == 'move':
assert "MOVE" in output, "help move should print 'move' help message"
elif command == 'use':
assert "USE" in output, "help use should print 'move' help message"
def test_save_evaluate(run):
"""Test that saving prints message
"""
output, _, _ = run(cli._save_evaluate)
assert "Saving Game" in output, "game states is saving"
def test_menu_evaluates_info(run):
"""Test that the menu evaluates the 'info' command
"""
output, _, _ = run(cli._menu_evaluate, ["info"])
assert "What is Dork" in output, "menu should accept 'info' as a command"
@pytest.mark.parametrize('state', [(5), (3), (1)])
def test_cli_state_changes(run, state):
"""Test that quit and load change states
and that menu is printed when returned
"""
with pytest.raises(SystemExit):
if state == 5:
output, _, _ = run(cli.repl, input_side_effect=['quit'])
assert "Leaving Dork" in output
elif state == 3:
output, _, _ = run(cli.repl, input_side_effect=[
'load', 'path', 'quit'])
assert "Loading previous" in output
elif state == 1:
output, _, _ = run(cli.repl, input_side_effect=['help', 'quit'])
assert "Welcome" in output
def test_game_evaluates_quit(run):
"""Test that when game quits, it asks player to save first
"""
output, _, _ = run(cli._game_evaluate, ['quit'], input_side_effect=['bad'])
assert "Would you like to save the game" in output
assert "Invalid Response" in output
def test_game_evaluates_full_command(run):
"""Test that the game evaluator calls the game engine
"""
with patch('dork.game_engine.user_command') as called:
run(cli._game_evaluate, ['move', 'north'])
called.assert_called_with(('move', '', 'North'))
def test_game_gives_help(run):
"""Test that the game provides help to players
"""
with patch('dork.cli._game_helper') as helping:
run(cli._game_evaluate, ['help', 'move'])
helping.assert_called_with('move')
def test_evaluate():
"""
Test evaluate
"""
assert cli.evaluate('quit', cli.State(5)) is None
assert cli.evaluate('save', cli.State(4)) == cli.State(1)
assert cli.evaluate('help', cli.State(2)) == cli.State(2)
# def test_menu_through_repl(run):
# """Test
# """
# with pytest.raises(SystemExit):
# output, _, _ = run(cli.repl, input_side_effect=
# ['load', 'path', 'quit', 'quit'])
# assert "Loading" in output,
# "menu should load previous save and then quit the game"
# https://stackoverflow.com/questions/15672151/is-it-possible-for-a-unit-test-to-assert-that-a-method-calls-sys-exit
# def test_quit_dork(run):
# with assertRaises(SystemExit):
# output, _, _ = run(cli._quit_dork)
# assert output == "Leaving Dork...\n\n"
```
#### File: Dork_Game_team_octosquad/tests/test_items.py
```python
from dork.items import Item
def test_init_method():
"""
Testing the constructor
"""
name = 'Donut'
description = {'This is an old fasion donut'}
properties = {'eatable'}
item = Item(name, description, properties)
assert item.name == name
assert item.description == description
assert item.properties == properties
def test_has_property():
"""
Testing the has_property method
"""
name = 'Donut'
description = {'This is an old fasion donut'}
properties = ['eatable', 'pickable']
item = Item(name, description, properties)
assert item.has_property('eatable') is True
def test_yaml_representation():
"""
Testing the yaml_representation method
"""
name = 'Donut'
description = {'This is an old fasion donut'}
properties = ['eatable', 'pickable']
item = Item(name, description, properties)
assert isinstance(item.yaml_representation(), dict)
```
#### File: Dork_Game_team_octosquad/tests/test_yamlloader.py
```python
import unittest
import dork.yamlloader as loader
class TestYamlLoader(unittest.TestCase):
"""
Testing YamlLoader
"""
def test_yaml_loader(self):
"""
Testing the writing_yml method
"""
data = ['This a .yml file created by the yaml_loader testing module']
file_path = './tests/testing_files/testing_yaml_loader_file.yml'
self.assertEqual(loader.writing_yml(data, file_path), None)
``` |
{
"source": "JoshMend/prebotc-graph-model",
"score": 2
} |
#### File: prebotc-graph-model/model/runmodel_euler_test.py
```python
import sys
#import prebotc_pure as prebotc
#import prebotc_cython as prebotc
import prebotc_weave as prebotc
import numpy as np
import graph_tool as gt
import scipy.io
import scipy.integrate
import pickle
paramFn = 'param_files/test.pkl'
outFn = 'output/test.mat'
graphFn = '../graphs/test.gml'
dt = 1e-4
t0 = 0.0
tf = 5
Nstep = int(round(tf/dt))
report_every = 1000
num_eqns_per_vertex = 7 #V, Na m, Na h, K n, hp Nap, Ca Can, Na pump
num_eqns_per_edge = 1
abs_error = 1e-9
rel_error = 1e-8
def main(argv=None):
# parse arguments (not used yet)
if argv is None:
argv = sys.argv
# load parameters
f = open(paramFn, 'r')
my_params = pickle.load(f)
f.close()
# load graph topology
g = gt.load_graph(graphFn)
g.reindex_edges()
num_vertices = g.num_vertices()
num_edges = g.num_edges()
# store vertex types
vertex_types = np.array( g.vertex_properties["type"].get_array(),
dtype=np.int )
# construct an edge list
edge_list = np.zeros( (num_edges, 3) )
# also a lookup table for in-edges
# this requires a degree list
in_degrees = np.array( g.degree_property_map("in").get_array(),
dtype=np.int )
max_degree = np.max( in_degrees )
if num_edges > 0:
# "ragged" array of in-edges
in_edges = np.zeros( (num_vertices, max_degree), dtype=np.int )
gsyn_props = g.edge_properties["gsyn"]
else:
in_edges = np.zeros( (num_vertices, max_degree), dtype=np.int )
gsyn_props = []
# for looping
in_edge_ct = np.zeros( (num_vertices,), dtype=np.int )
i = 0
for e in g.edges():
source_index = int( e.source() )
target_index = int( e.target() )
edge_list[i,...] = [source_index,
target_index,
gsyn_props[e]]
in_edges[ target_index, in_edge_ct[target_index] ] = i
# increment indices
in_edge_ct[ target_index ] += 1
i += 1
## setup initial conditions
# state will contain vertex variables & edge
# variables in a 1d array
N = num_vertices*num_eqns_per_vertex +\
num_edges*num_eqns_per_edge
# state vector y encodes vertex and edge data
y = np.zeros(N)
for i in range( num_vertices ):
# vertex data in 0:num_eqns_per_vertex*num_vertices-1
j = range(i*num_eqns_per_vertex, (i+1)*num_eqns_per_vertex)
#print(j)
y[j] = [
-0.026185387764343,
0.318012107836673,
0.760361103277830,
0.681987892188221,
0.025686471226045,
0.050058183820371,
4.998888741335261
]
offset = num_vertices*num_eqns_per_vertex
for i in range( num_edges ):
j = range(offset + i*num_eqns_per_edge,
offset + (i+1)*num_eqns_per_edge)
#print(j)
y[j] = 0.000001090946631
#print(N)
print y
# f is the rhs with parameters evaluated
def f(t, y):
dydt = prebotc.rhs(t, y,
vertex_types,
edge_list,
in_edge_ct,
in_edges,
my_params)
return dydt
# output vector of states
save_state = np.zeros( (N, Nstep) )
## hard-coded Euler method
t = t0;
for i in range(Nstep):
dydt = f(t, y)
y = y + dydt * dt # fwd Euler
#save_state[:, i] = y[ 0:(num_vertices*num_eqns_per_vertex):num_eqns_per_vertex ] # just voltages
save_state[:, i] = y; # all vars
t = t + dt;
if ( (i+1)%report_every ) == 0:
print t
scipy.io.savemat(outFn, mdict={'Y': save_state},
oned_as = 'col')
# run the main stuff
if __name__ == '__main__':
status = main()
sys.exit(status)
```
#### File: prebotc-graph-model/postprocessing/classify_phase.py
```python
import numpy as np
def fit_MRF_pseudolikelihood(adj_exc,adj_inh,y):
'''
Fit a Markov random field using maximum pseudolikelihood estimation,
also known as logistic regression. The conditional probabilities
follow
y_i ~ Logistic(B[0] + B[1] A1_{ij} y_j + A1[2] X_{ij} (1-y_j)
+ B[3] A2_{ij} y_j + B[4] A3_{ij} (1-y_j) ),
where A1 = adj_exc and A2 = adj_inh and each term is summed over
j.
Params
======
adj_exc: excitatory adjacency matrix
adj_inh: inhibitory adjacency matrix
y: site variables, 0 or 1
Returns
=======
B: logistic regression coefficients
'''
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
if len(np.unique(y)) < 2:
B=np.array([np.nan,np.nan,np.nan,np.nan,np.nan])
else:
N=y.shape[0]
ytile=np.tile(y,(N,1)).T
X1=np.array(np.sum(np.multiply(adj_exc,ytile),0)).flatten()
X2=np.array(np.sum(np.multiply(adj_exc,1-ytile),0)).flatten()
X3=np.array(np.sum(np.multiply(adj_inh,ytile),0)).flatten()
X4=np.array(np.sum(np.multiply(adj_inh,1-ytile),0)).flatten()
model=LogisticRegression(penalty='l2')
X=np.column_stack((X1,X2,X3,X4))
model.fit(X,y)
B=np.hstack((model.intercept_, model.coef_.flatten()))
return B
def predict_MRF(B, adj_exc, adj_inh, burn_in=4e3, steps=1e4,
skip_multiple=3):
'''
Perform prediction with an MRF (Markov random field). Uses Gibbs sampling
to sample from the distribution P(y) =1/Z exp( -H(y) ).
The Hamiltonian is:
H = \sum_{ij} y_i (B[0] + B[1] A1_{ji} y_j + A1[2] X_{ji} (1-y_j)
+ B[3] A2_{ji} y_j + B[4] A3_{ji} (1-y_j))
Params
======
B: coefficients of the MRF
adj_exc: excitatory adjacency matrix
adj_inh: inhibitory adjacency matrix
burn_in: number of burn-in steps to take (default: 4000)
steps: total number of Gibbs steps to take (default: 10000)
skip_multiple: skips skip_multiple * num_neuron steps between samples
Returns
=======
ypostmean: posterior mean of state
'''
import numpy.random
def gibbs_proba(y,B,adj_exc,adj_inh):
term0=B[0]*np.dot(adj_exc.T,y)
term1=B[1]*np.dot(adj_exc.T,(1-y))
term2=B[2]*np.dot(adj_inh.T,y)
term3=B[3]*np.dot(adj_inh.T,(1-y))
e=B[4]+term0+term1+term2+term3
return np.exp(e)/(np.exp(e)+1.0)
N=adj_exc.shape[0]
steps=int(steps)
# run a Gibbs sampler
y=np.random.rand(N,1)
samples=np.zeros((N,steps))
# zero diagonals (should be 0 already)
np.fill_diagonal(adj_exc,0)
np.fill_diagonal(adj_inh,0)
for ii in range(steps):
yt=y
# Gibbs update
proba=gibbs_proba(y,B,adj_exc,adj_inh)
yt=np.array(np.random.rand(N,1) < proba,dtype=np.float)
y=yt
samples[:,ii]=y.flatten()
# compute mle
use_indices=np.arange(burn_in,steps,skip_multiple*N, dtype=int)
final_samples=samples[:,use_indices]
ypostmean=np.mean(final_samples,axis=1)
return ypostmean
def fit_logistic_graph_features():
pass
def get_node_features(adj_exc,adj_inh,normalize_centrality=True):
'''
Get node-based features to train logistic classifier
Params
======
adj_exc: excitatory adjacency matrix
adj_inh: inhibitory adjacency matrix
normalize_centrality: normalize relevant measures? (default: True)
Returns
=======
X: numneuron x numfeatures array to be used with logistic regression
X_labels
'''
import networkx as nx
G_exc=nx.DiGraph(adj_exc)
G_inh=nx.DiGraph(adj_inh)
def dict_to_array(d):
return np.array([d[i] for i in sorted(d)])
def features(G,normalize_centrality):
'''
Returns the features we are interested in within a dict
'''
load_centrality=nx.load_centrality(G,normalized=normalize_centrality)
betweenness_centrality=nx.betweenness_centrality(G,normalized=normalize_centrality)
eigenvector_centrality=nx.eigenvector_centrality_numpy(G,normalized=normalize_centrality)
closeness_centrality=nx.closeness_centrality(G,normalized=normalize_centrality)
in_degree=G.in_degree()
out_degree=G.out_degree()
core_number=nx.core_number(G)
clustering=nx.clustering(G)
d={}
d['in_degree']=in_degree
d['out_degree']=out_degree
d['load_centrality']=load_centrality
d['betweennes_centrality']=betweennes_centrality
d['eigenvector_centrality']=eigenvector_centrality
d['closeness_centrality']=closeness_centrality
d['core_number']=core_number
return d
# grab the features
d_exc=features(G_exc)
d_inh=features(G_inh)
# setup some structures
num_features=len(d_exc)+len(d_inh)
num_nodes=G.number_of_nodes()
X=np.zeros((num_nodes,num_features),dtype=np.float)
X_labels=[]
# fill in X and Xlabels
feature_index=0
for gclass in ('exc','inh'):
if gclass == 'exc':
d=d_exc
else:
d=d_inh
for feature in sorted(d):
X_labels.append(feature+"_"+gclass)
X[:,feature_index]=dict_to_array(d[feature])
feature_index+=1
return X, X_labels
```
#### File: prebotc-graph-model/postprocessing_preBotBot/doPost.py
```python
import sys
import numpy as np
import scipy.signal
import scipy.io
import argparse
import networkx as nx
import matplotlib.pyplot as plt
import cmath
import math
maxorder=20
eta_norm_pts = 10
def parse_args(argv):
# defaults
transient = 10000 # ms
spike_thresh = -20 # mV
f_sigma = 20 # ms
butter_high = 4 # Hz
butter_low = -np.inf # Hz
bin_width = 20 # ms
cutoff = 0.5
peak_order = 30
peak_percentile = 75
eta_norm_pts=8
op_abs_thresh=0.2
# parsing
parser = argparse.ArgumentParser(prog="doPost",
description=('Postprocessing of'
' model output'))
parser.add_argument('sim', help='model output (.mat) file')
parser.add_argument('output', help='output (.jpg) filename')
parser.add_argument('--transient', '-t',
help='transient time, ms (default: %(default)s)',
type=float, default=transient)
parser.add_argument('--sec', '-s', action='store_true',
help='time units are in seconds (default: ms)')
parser.add_argument('--volt', '-V', action='store_true',
help=('file contains voltage traces '
'(default: sparse spike trains)'))
parser.add_argument('--thresh',
help='spike threshold, mV (default: %(default)s)',
type=float, default=spike_thresh)
parser.add_argument('--fsig', '-f',
help=('filter standard deviation, ms '
'(default: %(default)s)'),
type=float, default=f_sigma)
parser.add_argument('--butter_high',
help=('Butterworth filter upper cutoff frequency, Hz '
'(default: %(default)s)'),
type=float, default=butter_high)
parser.add_argument('--butter_low',
help=('Butterworth filter lower cutoff frequency, Hz '
'(default: %(default)s)'),
type=float, default=butter_low)
parser.add_argument('--bin_width', '-b',
help='bin width, ms (default: %(default)s)',
type=float, default=bin_width)
parser.add_argument('--cut', '-c',
help='burst cutoff parameter (default: %(default)s)',
type=float, default=cutoff)
args = parser.parse_args(argv[1:])
return args.sim, args.output, args.transient, args.sec, args.thresh, \
args.fsig, args.butter_low, args.butter_high, args.bin_width,\
args.cut, args.volt,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh,
'''
This method chops of the transient stage of the data for better processing
parameters: data-Data being passed in to chop
transient- time and which you want to chop till
dt-the change in time of the model
return: The modified data excluding transient stage
'''
def chop_transient(data, transient, dt):
firstIdx = int(np.ceil(transient / dt) - 1)
return data[:,firstIdx:]
'''
Find spikes in voltage data by taking relative maxima
parameters: data- self-explanatory
threshhold- The voltage at which you start to count data as a spike
return: new_indices-location of the maxima
spike_mat-dense matrix containing 1 or 0 based on if a spike is present
'''
def find_spikes(data, threshold):
indices = scipy.signal.argrelmax(data, axis=1) # 1st and 2nd coords of maxima
mask = np.where(data[indices] > threshold)
new_indices = (indices[0][mask],
indices[1][mask])
spike_mat = np.zeros(np.shape(data), dtype=np.int) # dense format
spike_mat[new_indices] = 1
return new_indices, spike_mat
'''
Return time indices of spiking of a given neuron
'''
def spikes_of_neuron(spikes, neuron):
return spikes[1][np.where(spikes[0] == neuron)]
'''
Filter the spike timeseries. Returns both neuron-by-neuron timeseries
filtered with a gaussian kernel and the population data filtered
with a butterworth filter.
Parameters
==========
spike_mat: the numneuron x time matrix of spikes
samp_freq: sample frequency
f_sigma: variance of gaussian
butter_freq: butterworth filter cutoff frequency(s)
Returns
=======
spike_fil: gaussian filtered matrix, same shape as spike_mat
int_signal: butterworth filtered population timeseries
spike_fil_butter: butterworth filtered matrix, same shape as spike_mat
'''
def spikes_filt(spike_mat, samp_freq, f_sigma, butter_freq):
'''
Filter the spike timeseries. Returns both neuron-by-neuron timeseries
filtered with a gaussian kernel and the population data filtered
with a butterworth filter.
Parameters
==========
spike_mat: the numneuron x time matrix of spikes
samp_freq: period (in ms) between measurements in spike_mat
f_sigma: variance of gaussian
butter_freq: butterworth filter cutoff frequency(s)
Returns
=======
spike_fil: gaussian filtered matrix, same shape as spike_mat
int_signal: butterworth filtered population timeseries
spike_fil_butter: butterworth filtered matrix, same shape as spike_mat
'''
def filt_window_gauss(samp_freq, std = 20, width = None, normalize = 1):
if width is None:
width = std*4+1
width /= samp_freq
std /= samp_freq
w = scipy.signal.gaussian(width, std)
if not normalize == 0:
w = normalize * w / sum(w)
return w
def filt_gauss(spike_mat, samp_freq, f_sigma=20):
w = filt_window_gauss(samp_freq, std=f_sigma, normalize=1)
spike_fil = scipy.signal.fftconvolve(spike_mat, w[ np.newaxis, : ],
mode='same')
#spike_fil = scipy.signal.convolve(spike_mat, w[ np.newaxis, : ],
# mode='same')
return spike_fil
def filt_butter(data, samp_freq, butter_freq, axis=-1):
'''
Filter data with a 2nd order butterworth filter.
Parameters
==========
data: ndarray
samp_freq: sampling period (s)
butter_freq: [cutoff_low, cutoff_high] (Hz), can be infinite
axis (optional): axis along which to filter, default = -1
Returns
=======
filtNs: filtered version of data
'''
order = 2
ny = 0.5 / samp_freq # Nyquist frequency
cof = butter_freq / ny # normalized cutoff freq
if np.isneginf(cof[0]) and np.isfinite(cof[1]):
# lowpass
cof1 = cof[1]
b, a = scipy.signal.butter(order, cof1, btype='low')
filtNs = scipy.signal.filtfilt(b, a, data, axis=axis)
elif np.isfinite(cof[0]) and np.isinf(cof[1]):
# highpass
cof1 = cof[0]
b, a = scipy.signal.butter(order, cof1, btype='high')
filtNs = scipy.signal.filtfilt(b, a, data, axis=axis)
elif np.isfinite(cof[0]) and np.isfinite(cof[1]):
# bandpass
b, a = scipy.signal.butter(order, cof, btype='band')
filtNs = scipy.signal.filtfilt(b, a, data, axis=axis)
else:
raise Exception('filt_butter called with bad cutoff frequency')
filtNs /= samp_freq # normalize to rate
return filtNs
spike_fil = filt_gauss(spike_mat, samp_freq, f_sigma=f_sigma)
int_signal = filt_butter(np.mean(spike_mat, axis=0),
samp_freq*1e-3, butter_freq)
spike_fil_butter = filt_butter(spike_fil, samp_freq*1e-3,
butter_freq, axis=1)
return spike_fil, int_signal, spike_fil_butter
'''
Bin spikes
Parameters
==========
spike_mat: matrix of spikes, (num_neuron x num_time)
bin_width: bin width in time units
dt: sampling frequency in spike mat
Returns
=======
bins: an array of the bin locations in time units
binned_spikes: a new matrix (num_neuron x num_bins)
'''
def bin_spikes(spike_mat, bin_width, dt):
num_neurons= np.shape(spike_mat)[0]
num_times = np.shape(spike_mat)[1]
stride = int(np.ceil(bin_width / dt))
bins = np.arange(0, num_times, stride, dtype=np.float)
which_bins = np.digitize(range(0, num_times), bins)
num_bins = len(bins)
binned_spikes = np.zeros((num_neurons, num_bins), dtype=np.int)
for i in range(num_bins):
bin_mask = np.where(which_bins == i)[0] # mask data in bin i, tuple
bin_data = spike_mat[:,bin_mask]
binned_spikes[:,i] = np.sum(bin_data, axis=1).flatten()
return bins, binned_spikes
'''
This is computes the cross correlation for two signals
paramters:
signal_1: first signal you want to use
signal_2: second signal you want to use
taolen: this number determines how much of the tao to use
returns:
values of the cross correlation
'''
def xcorr(signal_1,signal_2):
signal_1 = np.asarray(signal_1)
signal_2 = np.asarray(signal_2)
#Centering the data, giving it a zero mean to reduce variance and not worry about peak differences
m1 = np.mean(signal_1)
m2 = np.mean(signal_2)
signal_1_centered = (signal_1 - m1) / (np.std(signal_1) * len(signal_1))
signal_2_centered = (signal_2 - m2) / np.std(signal_2)
xcorr = scipy.signal.correlate(signal_1_centered,signal_2_centered)
return xcorr
'''
Gets info from the graph to be used for plotting
'''
def get_graphinfo(graph_fn):
graph = nx.read_gml(graph_fn)
cells_inhib = np.array(nx.get_node_attributes(graph, 'inh').values(),
dtype=np.int)
graph_edges = nx.edges(graph)
number_of_nodes = nx.number_of_nodes(graph)
degree_histogram = nx.degree_histogram(graph)
return cells_inhib, graph_edges,number_of_nodes,degree_histogram
'''
This method gets the time at which the peak occurs for a signal
goes through the given peak_times and finds at which point the signal is the
strongest
peak_times: the times at which a peak in the signal occurs
signal: The signal that you want to find the max of
'''
def find_max_time(peak_times,signal):
max_time = np.nan
for t in peak_times:
if np.isnan(max_time):
max_time = t
elif signal[t] > signal[max_time]:
max_time = t
return max_time
'''
This method finds the phase lag and population correlation for the data given. Use the max peak from the autocorrelation and then the cross correlation peak in the middle of those two
input:
xcorr-The cross correlation signal
autocorr-An autocorrelations signal to be ran against
output:
phase-The phase lag/difference between two populations
pop_corr-The correlation between both signals
'''
def find_metrics(xcorr,autocorr):
max_time_cross = np.nan;
peak_auto = scipy.signal.argrelmax(autocorr)[0].tolist()
peak_cross = scipy.signal.argrelmax(xcorr)[0].tolist()
max_time = find_max_time(peak_auto,autocorr)
for i in range(peak_auto.index(max_time)+1,len(peak_auto)):
if autocorr[peak_auto[i]] > 0:
max_time_next = peak_auto[i]
break
for x in peak_cross:
if x > max_time and x < max_time_next and xcorr[x] > 0:
max_time_cross = x
break
auto_period = max_time_next - max_time
auto_cross_perioid = max_time_cross - max_time
phase = float(auto_cross_perioid)/float(auto_period)
return phase, xcorr[max_time_cross]
'''
This method finds the population burst peaks for a given signal, uses a percentile filter to elimnate finding noisy peaks
input:
signal-This is the signal you want to find the peaks of
peak_order-The number of points of comparison for each peak on each side of the current value
peak_percentile-The percentage threshold the peak must meet
dt-The time step
output:
pop_burst_peak-Peaks of the signal that pass the given criteria for a peak
'''
def burst_stats(signal,peak_order,peak_percentile,dt):
pop_burst_peak=scipy.signal.argrelmax(signal, order=peak_order)[0]
pop_burst_peak=pop_burst_peak[signal[pop_burst_peak] >
np.percentile(signal,peak_percentile)]
return pop_burst_peak
'''
This method is used to get the phi (phase differences) between signals,
here we use a moving window to find the refrence perioid to calculate phi
input:
pop_burst_peak1(2)-This is the time for the peaks from signal 1 or signal 2
bins-This is the bin info for the signals after some post processing
output:
phis-List of phis for the signals
'''
def get_phis(pop_burst_peak1,pop_burst_peak2,bins):
phis = []
windowStartIndex = 0
windowEndIndex = 1
while windowEndIndex < len(pop_burst_peak1):
windowStart = pop_burst_peak1[windowStartIndex]
windowEnd = pop_burst_peak1[windowEndIndex]
peaksInWindow = [i for i in pop_burst_peak2 if i >= windowStart and i <= windowEnd]
for peak in peaksInWindow:
phi = (bins[peak] - bins[windowStart]) / (bins[windowEnd] - bins[windowStart])
phis.append(phi)
windowStartIndex = windowEndIndex
windowEndIndex = windowEndIndex + 1
return phis
'''
Map phi values to a circle to accuratley take mean and std of the values
input:
phis- Phi values that are in [0,1]
output:
phis- Phi values that are now mapped to [0,2pi] represents radians
'''
def map_phi_to_complex(phis):
complex = []
for i in range(len(phis)):
radians = 2*np.pi*phis[i]
complex.append(cmath.rect(1,radians))
return complex
'''
This will get the mean phi and variance using circular statistics
input:
complex_values- This is a list of complex values that are gotten from the phi values
output:
mean_angle- This is the mean angle of the phi values, represents what the average phase is (can be converted back)
variance_circular- This is the variance of the angles, 0 represents all phi values are the same.
'''
def get_circular_statistics(complex_values):
mean_resultant = np.mean(complex_values)
mean_angle = cmath.phase(mean_resultant)
variance_circular = abs(mean_resultant)
return mean_angle,variance_circular
'''
This converts the mean angle back to the standard phi values which lies in [0,1]
input:
mean_angle- This is the mean angle that was calculated from the list of phis
output:
This is the converted average phi values that now consisted with other metrics
'''
def get_normalized_phi(mean_angle):
if mean_angle < 0:
return (2*math.pi + mean_angle) / (2*math.pi)
else:
return mean_angle / (2*math.pi)
def synchrony_stats(data, dt, maxlags=3000):
'''
Synchrony measures
Parameters
==========
data: numneuron x time
dt: time spacing
maxlags: maximal lag for autocorrelation, default=3000 ms
Returns
=======
chi: synchrony measure
autocorr: autocorrelation of population avg \bar{data}(t)
'''
data_pop=np.mean(data, axis=0) # pop avg
sigma_pop=np.mean(np.square(data_pop)) - np.square(np.mean(data_pop))
sigma=np.mean(np.square(data), axis=1) - np.square(np.mean(data, axis=1))
sigma_mean=np.mean(sigma)
chisq=sigma_pop / sigma_mean
chi=np.sqrt(chisq)
mean_subtract=data_pop - np.mean(data_pop)
autocorr=scipy.signal.correlate(mean_subtract, mean_subtract,
mode='valid')
return chi, autocorr
def order_param(eta_norm, eta_t_norm, op_abs_thresh):
'''
Compute the order parameter for the normalized (phase) ETAs.
Parameters
==========
eta_norm: normalized ETA array
eta_t_norm: [-.5, .5] phases corresponding to second axis of array
op_abs_thresh: float
Returns
=======
ops: array of complex valued order parameters, np.nan if undefined
op_abs: magnitudes
op_angle: angles
op_mask: mask of ops with magnitude above threshold
op_angle_mean: mean angle of significant ops
op_angle_std: standard deviation of significant ops
'''
assert op_abs_thresh < 0.5 and op_abs_thresh >= 0.0,\
'op_abs_thresh out of range'
num_neurons=eta_norm.shape[0]
num_bins=eta_norm.shape[1]
dtheta=np.min(np.diff(eta_t_norm))
# below will generate NaNs if the normalization is 0
density_eta=eta_norm/np.tile(np.sum(eta_norm, axis=1),(num_bins,1)).T
ops=np.sum(density_eta*
np.exp(1.0j*
np.tile(eta_t_norm,(num_neurons,1))*
(2*np.pi)),
axis=1)
op_angle=np.angle(ops)/(2*np.pi)
op_abs=np.abs(ops)
op_mask=op_abs > op_abs_thresh
op_angle_mean=np.nanmean(op_angle[op_mask])
op_angle_std=np.nanstd(op_angle[op_mask])
return (ops,op_abs,op_angle,op_mask,op_angle_mean,op_angle_std)
def event_trig_avg(events, data, normalize=False, pts=10):
'''
Compute an event-triggered average.
Parameters
==========
events, ndarray
Array of event indices.
data, ndarray, ndim=2
Array to be averaged along dim 1 relative to the events.
normalize, bool, optional
Whether to normalize to phase variable
'''
breakpts=np.array(
np.hstack((0, (events[0:-1] + events[1:]) / 2., data.shape[1]-1)),
dtype=np.int)
if normalize:
from scipy.interpolate import griddata
max_interval=2*pts
fullrange=np.linspace(-.5, .5, num=max_interval)
xgrid1=fullrange[0:pts]
xgrid2=fullrange[pts:]
else:
max_interval=2*np.max(np.hstack((events-breakpts[0:-1],
breakpts[1:]-events)))
midpt=int(np.floor(max_interval / 2))
numevents=events.shape[0]-2 # don't use 1st and last due to boundary
eta=np.zeros((data.shape[0], max_interval))
for j in range(numevents):
i=j+1
timeidx=np.arange(int(breakpts[i]), int(breakpts[i+1]), dtype=np.int)
thisevent=events[i]
center=int(np.where(timeidx==thisevent)[0].astype(int))
if normalize:
xs1=np.array(timeidx[:center] - timeidx[center], dtype=np.float)
xs1 /= xs1[0]*(-2.0)
xs2=np.array(timeidx[center+1:] - timeidx[center], dtype=np.float)
xs2 /= xs2[-1]*2.0
xs=np.hstack((xs1, xs2))
toadd=np.apply_along_axis(lambda x:
scipy.interpolate.griddata(
xs, x, fullrange),
1, data[:,timeidx])
eta += toadd
else:
lpad=midpt - center
rpad=max_interval - (len(timeidx)+lpad)
eta += np.pad(data[:, timeidx], ((0,0), (lpad,rpad)),
'constant', constant_values=(0,0))
eta /= float(numevents)
eta[eta < 0] = 0
return eta
'''
This method is adapted from the old main methods of the code, this method will do all the post processing
and allow for it to be ran indpendently of main to allow for passing in of dictionaries withouth saving and loading them to the hard disc to avoid excess memory usage
Output:
mdict - The dictionary of final variables and results. Can either be saved or used as is.
'''
def run(sim_output,trans,sec_flag,spike_thresh,f_sigma,butter_low,butter_high,bin_width,cutoff,are_volts,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh):
butter_freq = np.array([butter_low,butter_high])
if sec_flag:
scalet=1e3
else:
scalet = 1
graph_fn = ''
if isinstance(sim_output['graphFn'],np.ndarray):
graph_fn = str(sim_output['graphFn'][0])
else:
graph_fn = sim_output['graphFn']
#Retrieve parameters from dictionary and data
dt = float(sim_output['dt'])*scalet
data = chop_transient(sim_output['Y'],trans,dt)
num_neurons = np.shape(data)[0]
tmax = np.shape(data)[1]
#Generate spike trains from the data and bin the spikes
if are_volts:
spikes, spike_mat = find_spikes(data, spike_thresh)
else:
data = scipy.sparse.csc.csc_matrix(data)
spike_mat= data.todense()
spikes = data.nonzero()
bins, spike_mat_bin = bin_spikes(spike_mat, bin_width, dt)
#Get the different versions of the filtered data
spike_fil_bin, butter_int_bin, spike_fil_butter = spikes_filt(spike_mat_bin[:num_neurons/2],
dt*bin_width,
f_sigma,
butter_freq)
spike_fil_bin2, butter_int_bin2, spike_fil_butter2 = spikes_filt(spike_mat_bin[num_neurons/2:],
dt*bin_width,
f_sigma,
butter_freq)
#Calculate Correlation Values
cross_correlation = xcorr(butter_int_bin2,butter_int_bin)
auto_cross_correlation1 = xcorr(butter_int_bin,butter_int_bin)
auto_cross_correlation2 = xcorr(butter_int_bin2,butter_int_bin2)
#phase_lag,pop_corr = find_metrics(cross_correlation,auto_cross_correlation1)
#graph attributes
cells_inhib,graph_edges,number_of_nodes,degree_histogram = get_graphinfo(graph_fn)
#Calculating Values for Circle Map
pop_burst_peak1 = burst_stats(butter_int_bin,peak_order,peak_percentile,dt*bin_width/1000.)
pop_burst_peak2 = burst_stats(butter_int_bin2,peak_order,peak_percentile,dt*bin_width/1000.)
phis = get_phis(pop_burst_peak1,pop_burst_peak2,bins)
complex_phis = map_phi_to_complex(phis)
mean_angle,variance_angle = get_circular_statistics(complex_phis)
mean_phi = get_normalized_phi(mean_angle)
#std_phi = np.std(phis)
#Get Synchrony Values for each signal
chi1,chi1_auto = synchrony_stats(spike_fil_bin,dt*bin_width/1000.)
chi2,chi2_auto = synchrony_stats(spike_fil_bin2,dt*bin_width/1000.)
'''##Compute event triggered averages and get individual cell statistics
##Population 1
##Normalize time to phase variable [-.5,.5]
eta1_norm = event_trig_avg(pop_burst_peak1,spike_fil_bin,normalize=True,pts=eta_norm_pts)
eta1_t_norm = np.linspace(-0.5, 0.5, 2*eta_norm_pts)
##Order Parameters
(ops1,op_abs1,op_angle1,op_mask1,
op_angle_mean1,op_angle_std1)=order_param(eta1_norm,eta1_t_norm,op_abs_thresh)
##Population 2
##Normalize time to phase variable [-.5,.5]
eta2_norm = event_trig_avg(pop_burst_peak2,spike_fil_bin2,normalize=True,pts=eta_norm_pts)
eta2_t_norm = np.linspace(-0.5, 0.5, 2*eta_norm_pts)
##Order Parameters
(ops2,op_abs2,op_angle2,op_mask2,
op_angle_mean2,op_angle_std2)=order_param(eta2_norm,eta2_t_norm,op_abs_thresh)'''
mdict = {'bins':bins,
'spike_mat':spike_mat,
'spike_mat_bin':spike_mat_bin,
'spike_fil_bin':spike_fil_bin,
'spike_fil_bin':spike_fil_bin2,
'butter_int_bin': butter_int_bin,
'butter_int_bin2': butter_int_bin2,
'cross_correlation': cross_correlation,
'auto_cross_correlation1':auto_cross_correlation1,
'auto_cross_correlation2':auto_cross_correlation2,
'cells_inhib': cells_inhib,
'graph_edges':graph_edges,
'number_of_nodes':number_of_nodes,
'degree_histogram':degree_histogram,
#'phase_lag': phase_lag,
#'pop_correlation': pop_corr,
'time': sim_output['tf'],
'bin_width': bin_width,
'phis' : phis,
'mean_phi': mean_phi,
'variance_angle' : variance_angle,
'chi1' : chi1,
'chi2' : chi2,
'pop_burst_peak1': pop_burst_peak1,
'pop_burst_peak2': pop_burst_peak2
#'op_abs1' : op_abs1,
#'op_angle1' : op_angle1,
#'op_angle_mean1' : op_angle_mean1,
#'op_angle_std1' : op_angle_std1,
#'op_abs2' : op_abs2,
#'op_angle2' : op_angle2,
#'op_angle_mean2' : op_angle_mean2,
#'op_angle_std2' : op_angle_std2
}
return mdict
def main(argv=None):
should_save = True
if argv is None:
argv = sys.argv
else:
should_save = False
(simFn, outFn, trans, sec_flag, spike_thresh, f_sigma, butter_low,
butter_high, bin_width, cutoff, are_volts,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh) = parse_args(argv)
sim_output = scipy.io.loadmat(simFn)
post_dict = run(sim_output,trans,sec_flag,spike_thresh,f_sigma,butter_low,butter_high,bin_width,cutoff,are_volts,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh)
if should_save:
scipy.io.savemat(outFn,post_dict,oned_as ='column')
else:
return post_dict
if __name__ == '__main__':
status = main()
sys.exit(status)
```
#### File: prebotc-graph-model/testing/graph_tests.py
```python
import unittest
import networkx as nx
import os
import numpy as np
import sys
import math
'''
This class allows for testing of graph generation using unit tests and saves them to a log file so it can be ran from the command line
efficiently
'''
class TestGraph(unittest.TestCase):
'''
Constructor that takes the desired properties that are going to be used for testing
Parameters:
testname-This is the name of the testmethod you are calling, this allows for efficient testing and adding using cmd line arguments
graph-This is the graph name that you are looking at
gamma-This is the amount of intra inhibitory
NOTE: Must use the full path name for the grap or it will not be able to find it
'''
def __init__(self,testname,graph_file,kintra,kinter):
super(TestGraph,self).__init__(testname)
self.graph_file = graph_file
self.kintra = kintra
self.kinter = kinter
def setUp(self):
pass
'''
This method tests the connection distribution of the graph generation methods. Since it is impossible to eliminate all the
variance in the distribution due this test is used for graphs with large amounts of neurons in each population >= 10000
this allows for the probability distribution to be much more accurate and allow for accurate comparisons
'''
def test_connectionDistribution(self):
graph = nx.read_gml(self.graph_file)
#Gets the probability distribution for connections
n0 = len([d for n,d in graph.nodes_iter(data=True) if d['respir_area'] == 0])
n1 = len([d for n,d in graph.nodes_iter(data=True) if d['respir_area'] == 1])
area_size = [n0,n1]
pMatE = np.array([ (3.0/(n0-1), 0/n1),
(0/n0, 3.0/(n1-1)) ])
pMatI = np.array([ (self.kintra/(n0-1), (self.kinter)/n1),
((self.kinter)/n0, self.kintra/(n1-1)) ])
#First level list represents area number
#Second level list represents type of neuron index 0 is exict and index 1 is inh
excit_inh_node_count = [[0,0],[0,0]]
#Gets totals for the types of neuron in each population
for i in range(n0 + n1):
if i < n0:
if graph.node[i]['inh'] == 0:
excit_inh_node_count[0][0] += 1
else:
excit_inh_node_count[0][1] += 1
else:
if graph.node[i]['inh'] == 0:
excit_inh_node_count[1][0] += 1
else:
excit_inh_node_count[1][1] += 1
excit_sampled = [[0,0],[0,0]]
inh_sampled = [[0,0],[0,0]]
#Calculates actual observed connectivity
edges = graph.edges()
for e in edges:
source = e[0]
target = e[1]
source_index = 0
target_index = 0
if source < n0:
source_index = 0
else:
source_index = 1
if target < n0:
target_index = 0
else:
target_index = 1
if graph.node[source]['inh'] == 0:
excit_sampled[source_index][target_index] += 1
else:
inh_sampled[source_index][target_index] += 1
excit_prob_sampled = [[-1,-1],[-1,-1]]
inh_prob_sampled = [[-1,-1],[-1,-1]]
#Calculates sample probability for the given connections
for i in range(2):
for j in range(2):
nodes_in_j = area_size[j]
if i == j:
nodes_in_j -= 1
if excit_inh_node_count[i][0] != 0:
excit_prob_sampled[i][j] = float(excit_sampled[i][j]) / (excit_inh_node_count[i][0] * nodes_in_j)
if excit_inh_node_count[i][1] != 0:
inh_prob_sampled[i][j] = float(inh_sampled[i][j]) / (excit_inh_node_count[i][1] * nodes_in_j)
#Calculates the percentage difference between two populations
percentage_diff_excit = (abs(pMatE-excit_prob_sampled) / pMatE) * 100
percentage_diff_inh = (abs(pMatI-inh_prob_sampled) / pMatI) * 100
self.assertTrue(percentage_diff_excit[0][0] <= 5 and percentage_diff_excit[0][1] <= 5 and percentage_diff_excit[1][0] <= 5 and percentage_diff_excit[1][1] <= 5)
self.assertTrue(percentage_diff_inh[0][0] <= 5 and percentage_diff_inh[0][1] <= 5 and percentage_diff_inh[1][0] <= 5 and percentage_diff_inh[1][1] <= 5)
if __name__ == '__main__':
graph = sys.argv[1]
kintra = sys.argv[2]
kinter = sys.argv[3]
output = sys.argv[4]
log_file = output
f = open(log_file,"w")
suite = unittest.TestSuite()
suite.addTest(TestGraph("test_connectionDistribution",graph,kintra,kinter))
unittest.TextTestRunner(f,verbosity=2).run(suite)
f.close()
``` |
{
"source": "joshmeranda/forgehub",
"score": 3
} |
#### File: forgehub/forgehub/__init__.py
```python
import datetime
from forgehub.events import *
from forgehub.git import *
from forgehub.render import *
from argparse import ArgumentParser, FileType, Namespace
import os
import sys
from typing import Optional, Union
from github import AuthenticatedUser, Github, GithubException, NamedUser
import pygit2
GithubUser = Union[NamedUser.NamedUser, AuthenticatedUser.AuthenticatedUser]
def __parse_args() -> Namespace:
# todo: create a new repository rather than using an existing repo
parser = ArgumentParser(
prog="forgehub",
description="Abuse the github activity calendar to draw patterns or write messages",
add_help=True,
)
subparsers = parser.add_subparsers(dest="subcommand", required=True)
# # # # # # # # # # # # # # # # # #
# subcommand write #
# # # # # # # # # # # # # # # # # #
write_parser = subparsers.add_parser(
"write", help="write text to your github activity calendar"
)
write_parser.add_argument(
"repo",
help="either a path to a locally cloned repo, or the url to an upstream repository",
)
write_parser.add_argument(
"-d",
"--dilute",
action="store_true",
help="specify to dilute existing activity by generating even more commits",
)
write_parser.add_argument(
"--user",
help=(
"the name of the target user, if not specified the user is determined by"
"either the user associated with the passed token or the git system / global configs"
),
)
source_group = write_parser.add_mutually_exclusive_group()
source_group.add_argument(
"text",
nargs="?",
help="the text that should be displayed on the github activity calendar",
)
source_group.add_argument(
"-l",
"--load",
type=FileType("r"),
help="load an unscaled data level map from the given file",
)
create_group = write_parser.add_argument_group(
title="creation",
description="arguments controlling if and how a new repository is created",
)
create_group.add_argument(
"-c",
"--create",
action="store_true",
help="create a new local and remote repository with the name given as repo rather than using an existing one (requires an access token)",
)
create_group.add_argument(
"-p",
"--private",
action="store_true",
help="specify that the new repository should be public rather than private (be careful of your activity calendar's 'Private Contributions' setting)",
)
create_group.add_argument(
"-R", "--replace",
action="store_true",
help="if a repository already exists for the authenticated user, delete and replace it (use with caution)",
)
ssh_group = write_parser.add_argument_group(
title="ssh", description="values to use when communicating with github over ssh"
)
ssh_group.add_argument(
"--public-key",
help="the file path of the public ssh key to for ssh operations, `~/.ssh/id_rsa.pub` if not specified",
)
ssh_group.add_argument(
"--private-key",
help="the file path of the private ssh key to for ssh operations, `~/.ssh/id_rsa` if not specified",
)
# not required since we can still perform github queries using public only information
token_group = write_parser.add_mutually_exclusive_group()
token_group.add_argument(
"-t", "--token", help="use the given value as the authenticated access token"
)
token_group.add_argument(
"-F",
"--token-file",
type=FileType("r"),
help="read the token from the given file",
)
behavior_group = write_parser.add_argument_group()
behavior_group.add_argument(
"--no-clean",
action="store_true",
help="do not remove any cloned repositories after commits are pushed",
)
behavior_group.add_argument(
"-n",
"--no-push",
action="store_true",
help="do not push the crafted commits automatically (implies (--no-clean)",
)
push_group = write_parser.add_argument_group(title="push", description="a group of argument modifying the default push behavior")
push_group.add_argument(
"--remote", help="the name of the remote to push to (default to origin)", default="origin"
)
push_group.add_argument(
"--branch", help="the name of the branch to push to (defaults to main)", default="main",
)
# # # # # # # # # # # # # # # # # #
# subcommand dump #
# # # # # # # # # # # # # # # # # #
dump_parser = subparsers.add_parser(
"dump", help="dump the DataLevelMap for the given data to a file, or stdout"
)
dump_parser.add_argument("text", help="the text to be rendered and dumped")
dump_parser.add_argument(
"-o", "--out", type=FileType("w"), help="the output file for the DataLevelMap"
)
dump_parser.add_argument(
"-i", "--include-dates", action="store_true", help="include dates in output"
)
return parser.parse_args()
def __get_token(namespace: Namespace) -> Optional[str]:
if namespace.token is not None:
return namespace.token
if namespace.token_file is not None:
return namespace.token_file.readline().rstrip("\n")
return None
def __get_user(namespace: Namespace) -> Optional[GithubUser]:
token = __get_token(namespace)
# todo: we should probably ask the user for username and password if not given (--login / --no-login /
# --interactive?) rather than just carrying on with an unauthenticated client
if token is not None:
github_client = Github(login_or_token=token)
else:
github_client = Github()
if namespace.user is not None:
return github_client.get_user(namespace.user)
if token is not None:
# return the authenticated user
return github_client.get_user()
try:
return github_client.get_user(pygit2.Config.get_system_config()["user.name"])
except (OSError, KeyError):
pass
try:
return github_client.get_user(pygit2.Config.get_xdg_config()["user.name"])
except (OSError, KeyError):
pass
try:
return github_client.get_user(pygit2.Config.get_global_config()["user.name"])
except (OSError, KeyError):
pass
return None
def __get_ssh_keys(namespace: Namespace) -> (str, str):
"""Retrieve the public key, and private key files.
:param namespace: The namespace of command line arguments.
:return: The paths to the private key, and the public key
"""
home = os.getenv("HOME")
if namespace.private_key is None:
private = os.path.join(home, ".ssh", "id_rsa")
else:
private = namespace.private_key
if namespace.public_key is None:
public = os.path.join(home, ".ssh", "id_rsa.pub")
else:
public = namespace.public_key
return private, public
def __repo_name_from_url(url: str) -> str:
return url.split("/")[-1].split(".")[0]
def __parse_data_level_map_from_file(file) -> DataLevelMap:
content: str = file.read().strip()
if content.isdigit():
data_levels = list(map(int, content.split()))
renderer = TextRenderer()
data_level_map = renderer.render_data_levels(data_levels)
else:
data_level_map = DataLevelMap()
for line in content.splitlines():
date, data_level = line.split(":")
date = datetime.datetime.strptime(date, "%Y.%m.%d")
data_level_map[date] = data_level
return data_level_map
def __write(namespace: Namespace) -> int:
print("rendering output...")
if namespace.load is not None:
try:
data_level_map = __parse_data_level_map_from_file(namespace.load)
except Exception as err:
print(f"could not load DataLevelMap from file: {err}")
return 1
else:
text = namespace.text if namespace.text is not None else input()
renderer = TextRenderer()
data_level_map = renderer.render(text.upper())
try:
user = __get_user(namespace)
except GithubException as err:
print(f"an error occurred fetching github user: {err}")
return 1
if user is None:
print("no user could be determined from arguments or environment")
return 1
print(f"retrieving activity for user '{user.login}'...")
_, max_events_per_day = events.get_max_events_per_day(user)
boundaries = events.get_data_level_boundaries(max_events_per_day, namespace.dilute)
data_level_map.scale_to_boundaries(boundaries)
print("initializing repository...")
private, public = __get_ssh_keys(namespace)
repo = namespace.repo
if os.path.exists(repo):
repo_path = repo
repo_upstream = None
else:
repo_path = __repo_name_from_url(repo)
repo_upstream = repo
callbacks = SshRemoteCallbacks(private, public)
try:
with GitDriver() as driver:
if namespace.create:
if not isinstance(user, AuthenticatedUser.AuthenticatedUser):
print("to create a new repository you must provide a token")
return 5
driver.create(namespace.repo, user, callbacks, namespace.replace, namespace.private)
elif repo_upstream is not None:
driver.clone_into(repo_path, repo_upstream, callbacks)
else:
driver.init_repo(repo_path)
print("forging commits...")
driver.forge_commits(data_level_map)
print("pushing to upstream...")
ref_specs = [f"refs/heads/{namespace.branch}"]
driver.push(remote_name=namespace.remote, ref_specs=ref_specs, push_callbacks=callbacks)
except DriverInitError as err:
print(f"error initializing repository: {err}")
return 2
except DriverForgeError as err:
print(f"error forging commits: {err}")
return 3
except DriverPushError as err:
print(f"error pushing to upstream: {err}")
return 4
return 0
def __dump(namespace: Namespace) -> int:
renderer = render.TextRenderer()
raw_data_level_map = renderer.render(namespace.text.upper())
if namespace.out is None:
out_file = sys.stdout
else:
out_file = namespace.out
try:
if namespace.include_dates:
out_file.write(
"\n".join(
map(
lambda pair: f"{pair[0].strftime('%Y.%m.%d')}:{pair[1]}",
raw_data_level_map.items(),
)
)
)
else:
# sort the data levels by date and write to output
out_file.write(
"".join(
[
str(i[1])
for i in sorted(
raw_data_level_map.items(), key=lambda i: i[0], reverse=True
)
]
)
)
except IOError as err:
print(f"could not write to output: {err}")
return 1
if namespace.out is not None:
out_file.close()
return 0
def main():
namespace = __parse_args()
match namespace.subcommand:
case "write":
exit_code = __write(namespace)
case "dump":
exit_code = __dump(namespace)
case _:
exit_code = 5
sys.exit(exit_code)
``` |
{
"source": "joshmeranda/undo",
"score": 3
} |
#### File: undo/tests/test_expand.py
```python
import unittest
from undo import expand
join_expanded = expand.__join_expanded
separate = expand.__separate
class TestJoinExpanded(unittest.TestCase):
def test_join_no_list(self):
expected = ["mv INNER OUTER"]
actual = join_expanded(["mv ", "INNER", " ", "OUTER"])
self.assertListEqual(expected, actual)
def test_expanded_single_list(self):
expected = ["list a", "list b"]
actual = join_expanded(["list ", ["a", "b"]])
self.assertEqual(expected, actual)
def test_expanded_multiple_list(self):
expected = ["a b", "c d"]
actual = join_expanded([["a", "c"], " ", ["b", "d"]])
self.assertEqual(expected, actual)
class TestSeparation(unittest.TestCase):
def test_separation_basic(self):
content = "$( 'hello' ) world"
expected = ["$( 'hello' )", " world"]
actual = separate(content, ("$(", ")"))
self.assertListEqual(expected, actual)
def test_nested(self):
content = "$(\"$(basename('/some/file/path/hello')\") world"
expected = ["$(\"$(basename('/some/file/path/hello')\")", " world"]
actual = separate(content, ("$(", ")"))
self.assertListEqual(expected, actual)
def test_multiple_expressions_with_same_open_and_close_bounds(self):
content = "% 'hello' % % 'world' %"
expected = ["% 'hello' %", " ", "% 'world' %"]
actual = separate(content, ("%", "%"))
self.assertListEqual(expected, actual)
def test_unintended_close_bound(self):
content = "$(do_something('hello'))"
expected = ["$(do_something('hello')", ")"]
actual = separate(content, ("$(", ")"))
self.assertListEqual(expected, actual)
@unittest.skip("not yet implemented")
def test_escaped_open_bound(self):
content = "\\$( $(hello world)"
expected = ["$( ", "%(hello world)"]
actual = separate(content, ("$(", ")"))
self.assertListEqual(expected, actual)
@unittest.skip("not yet implemented")
def test_escaped_close_bound(self):
content = "$(hello world \\))"
expected = ["%(hello world)", ")"]
actual = separate(content, ("$(", ")"))
self.assertListEqual(expected, actual)
class TestExpansion(unittest.TestCase):
def test_no_expansion(self):
expected = "no expansion"
actual = expand.expand("no expansion", dict(), ("%", "%"), None)
self.assertEqual(expected, actual)
def test_leading_expansion(self):
expected = "hello world"
actual = expand.expand("% 'hello' % world", dict(), ("%", "%"), None)
self.assertEqual(expected, actual)
def test_trailing_expansion(self):
expected = "hello world"
actual = expand.expand("hello % 'world' %", dict(), ("%", "%"), None)
self.assertEqual(expected, actual)
def test_no_space_prefix(self):
expected = "aA"
actual = expand.expand("a% 'A' %", dict(), ("%", "%"), None)
self.assertEqual(expected, actual)
def test_middling_expansion(self):
expected = "rm --verbose --recursive"
actual = expand.expand("rm % VERBOSE ? '--verbose' % --recursive", {"VERBOSE": str(True)}, ("%", "%"), None)
self.assertEqual(expected, actual)
def test_back_to_back_expansion(self):
expected = "FirstSecond"
actual = expand.expand("% 'First' %% 'Second' %", dict(), ("%", "%"), None)
self.assertEqual(expected, actual)
def test_separated_expressions(self):
expected = "First something Second"
actual = expand.expand("% 'First' % something % 'Second' %", dict(), ("%", "%"), None)
self.assertEqual(expected, actual)
def test_list_expansion_expression(self):
expected = "list a b c"
actual = expand.expand("list % $LIST... %", {"LIST": ['a', 'b', 'c']}, ("%", "%"), None)
self.assertEqual(expected, actual)
def test_no_list_expansion(self):
expected = "list a; list b; list c"
actual = expand.expand("list % $LIST %", {"LIST": ['a', 'b', 'c']}, ("%", "%"), "; ")
self.assertEqual(expected, actual)
def test_multiple_no_list_expansion(self):
expected = "list a d; list b e; list c f"
actual = expand.expand("list % $LISTA % % $LISTB %", {
"LISTA": ['a', 'b', 'c'],
"LISTB": ['d', 'e', 'f'],
}, ("%", "%"), "; ")
self.assertEqual(expected, actual)
def test_multiple_no_list_expansion_no_sep(self):
expected = "list a d; list b e; list c f"
expected = [
"list a d",
"list b e",
"list c f",
]
actual = expand.expand("list % $LISTA % % $LISTB %", {
"LISTA": ['a', 'b', 'c'],
"LISTB": ['d', 'e', 'f'],
}, ("%", "%"), None)
self.assertEqual(expected, actual)
def test_multiple_unique_no_list_expansion(self):
with self.assertRaises(ValueError):
expand.expand("% $LISTA % % $LISTB %", {
"LISTA": ['a', 'b', 'c'],
"LISTB": ['d', 'e', 'f', 'g'],
}, ("%", "%"), None)
if __name__ == "__main__":
unittest.main()
```
#### File: undo/tests/test_history.py
```python
import unittest
import io
from undo import history
class TestFishHistory(unittest.TestCase):
def test_get_most_recent(self):
commands = ["d",
"c",
"b",
"a",
"ignore.me"]
stream = io.StringIO("\n".join(commands))
expected = ["a"]
actual = history.history(shell="fish", stream=stream)
self.assertListEqual(expected, actual)
def test_get_n_most_recent(self):
commands = ["d",
"c",
"b",
"a",
"ignore.me"]
stream = io.StringIO("\n".join(commands))
expected = commands[:-1]
actual = history.history("fish", 4, stream)
self.assertListEqual(expected, actual)
def test_get_overflow(self):
commands = ["d",
"c",
"b",
"a",
"ignore.me"]
stream = io.StringIO("\n".join(commands))
expected = commands[:-1]
actual = history.history("fish", 10, stream)
self.assertListEqual(expected, actual)
class TestShHistory(unittest.TestCase):
def test_get_most_recent(self):
commands = [" 1 d",
" 2 c",
" 3 b",
" 4 a",
" 5 ignore.me"]
stream = io.StringIO("\n".join(commands))
expected = ["a"]
actual = history.history(shell="sh", stream=stream)
self.assertListEqual(expected, actual)
def test_get_n_most_recent(self):
commands = [" 1 d",
" 2 c",
" 3 b",
" 4 a",
" 5 ignore.me"]
stream = io.StringIO("\n".join(commands))
expected = ["d", "c", "b", "a"]
actual = history.history("sh", 4, stream)
self.assertListEqual(expected, actual)
def test_get_overflow(self):
commands = [" 1 d",
" 2 c",
" 3 b",
" 4 a",
" 5 ignore.me"]
stream = io.StringIO("\n".join(commands))
expected = ["d", "c", "b", "a"]
actual = history.history("sh", 10, stream)
self.assertListEqual(expected, actual)
if __name__ == "__main__":
unittest.main()
```
#### File: test_undos/test_coreutils/test_install.py
```python
import os
import shutil
import unittest
from undo import resolve, expand
from tests.test_undos.test_coreutils import common
class TestInstall(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
if os.path.exists(common.COREUTILS_TEST_ENV_DIR):
shutil.rmtree(common.COREUTILS_TEST_ENV_DIR)
os.mkdir(common.COREUTILS_TEST_ENV_DIR)
os.mkdir(os.path.join(
common.COREUTILS_TEST_ENV_DIR,
"DIR"
))
cwd_bak = os.getcwd()
os.chdir(common.COREUTILS_TEST_ENV_DIR)
cls.addClassCleanup(shutil.rmtree, common.COREUTILS_TEST_ENV_DIR)
cls.addClassCleanup(os.chdir, cwd_bak)
def test_install_file(self):
command = "install SRC DIR/DST"
expected = []
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
expected = ["rm DIR/DST"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, True, "sh")]
self.assertListEqual(expected, actual)
def test_install_file_no_target_directory(self):
command = "install -T SRC DIR/DST"
expected = []
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
expected = ["rm DIR/DST"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, True, "sh")]
self.assertListEqual(expected, actual)
def test_install_single(self):
command = "install SRC DIR"
expected = []
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
expected = ["rm DIR/SRC"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, True, "sh")]
self.assertListEqual(expected, actual)
def test_install_multiple(self):
command = "install A B C DIR"
expected = []
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
expected = ["rm DIR/A DIR/B DIR/C"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, True, "sh")]
self.assertListEqual(expected, actual)
def test_install_single_target_directory(self):
command = "install -t DIR SRC"
expected = []
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
expected = ["rm DIR/SRC"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, True, "sh")]
self.assertListEqual(expected, actual)
def test_install_multiple_target_directory(self):
command = "install -t DIR A B C"
expected = []
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
expected = ["rm DIR/A DIR/B DIR/C"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, True, "sh")]
self.assertListEqual(expected, actual)
def test_install_directory(self):
command = "install -d A B C"
expected = []
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
expected = ["rm --recursive A B C"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, True, "sh")]
self.assertListEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
```
#### File: undo/undo/expand.py
```python
import logging
import re
import typing
from undo import expression
def __join_expanded(expanded: list[typing.Union[str, list[str]]]) -> list[str]:
"""Join the expanded items into a single or multiple commands.
:param expanded: the expanded items to join.
:return: The string value
"""
list_values = [(i, val) for i, val in enumerate(expanded) if isinstance(val, list)]
if len(list_values) == 0:
return ["".join(expanded)]
initial_len = len(list_values[0][1]) if list_values else None
if not all(len(i) == initial_len for _, i in list_values[1::]):
raise ValueError("not all non-expanded list are of the same size")
pairs = zip(*[[(i, j) for j in val] for i, val in list_values])
result = list()
for pair in pairs:
cc = expanded.copy()
for i, v in pair:
del(cc[i])
cc.insert(i, v)
result.append("".join(cc))
return result
def __find_matching_closing_bound(content: str, head: int, open_bound: str, close_bound: str) -> int:
depth = 0
while head < len(content):
if content[head::].startswith(open_bound) and not (open_bound == close_bound and depth != 0):
depth += 1
head += len(open_bound)
elif content[head::].startswith(close_bound) and content[head - 1] != "\\":
depth -= 1
head += len(close_bound)
if depth == 0:
return head
else:
head += 1
return head
def __separate(content: str, bounds: tuple[str, str]) -> list[str]:
open_bound = bounds[0]
close_bound = bounds[1]
result = list()
last = 0
head = 0
while head < len(content):
if content[head::].startswith(open_bound) and content[head - 1] != "\\":
if head != last:
result.append(content[last:head:])
last = head
head = __find_matching_closing_bound(content, head, open_bound, close_bound)
result.append(content[last:head:])
last = head
else:
head += 1
if head != last:
result.append(content[last:head:])
return result
def expand(undo: str, env: dict[str, typing.Union[str, list[str]]], bounds: tuple[str, str],
command_sep: typing.Optional[str]) -> typing.Union[str, list[str]]:
"""Expand a string containing 0 or more UndoExpressions in them using the given environment.
:param undo: the undo pattern to expand.
:param env: the dictionary containing the values to use for evaluating undo expressions.
:param bounds: the bounds around an expressions.
:param command_sep: the join delimiter to use if expansion results in a string.
:return: if command_sep is not None or only one command is expanded, then a string of the one or more expanded
commands join on command-sep. Otherwise the list of expanded commands.
:raise ValueError: for any error with bad syntax or format.
"""
if undo.count("%") % 2 != 0:
raise ValueError(f"unbalanced '%' in : {undo}")
expr_regex = rf"{re.escape(bounds[0])}.*?{re.escape(bounds[1])}"
splits = __separate(undo, bounds)
expanded = list()
for i in splits:
if re.fullmatch(expr_regex, i):
expr = expression.parse(i.removeprefix(bounds[0]).removesuffix(bounds[1]).strip())
if isinstance(expr, expression.ValueExpression):
expanded.append(expr.evaluate(env))
else:
logging.error(f"expected a string value but found a boolean: '{i}'")
else:
expanded.append(i)
command = __join_expanded(expanded)
if len(command) == 1:
return command[0]
if command_sep is not None:
return command_sep.join(command)
return command
```
#### File: undo/pattern/pattern.py
```python
import dataclasses
import enum
import re
import typing
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Errors #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class PatternError(ValueError):
"""Raise for any error when parsing patterns."""
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Objects #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class Quantifier(enum.Enum):
FLAG = enum.auto()
OPTIONAL = enum.auto()
AT_LEAST_ONE = enum.auto()
ANY = enum.auto()
N = enum.auto()
class ArgNum:
def __init__(self, quantifier: Quantifier, count: typing.Optional[int] = None):
"""Describes the amount of values for a command argument.
:param quantifier: describes the type of value quantity.
:param count: the optional amount of arguments (defaults to None), if quantifier is not Quantifier.N this
parameter is ignored.
:raises
"""
self.quantifier = quantifier
if self.quantifier == Quantifier.N:
if count is None:
raise ValueError("'count' must not be None when quantifier is N")
if count < 0:
raise ValueError("'count' must be >= 0 but was '{count}'")
self.count = count
else:
self.count = None
def __repr__(self):
return f"{type(self).__name__}({', '.join([f'{name}: {repr(value)}' for name, value in vars(self).items() if name[0] != '_'])})"
def __eq__(self, other) -> bool:
return (isinstance(other, ArgNum)
and self.quantifier == other.quantifier
and self.count == other.count)
@dataclasses.dataclass
class ArgumentPattern:
# if var_name is optional, it should be assigned in order from 1 - n in the calling method / class
var_name: typing.Optional[str]
arg_num: typing.Union[ArgNum, int]
args: list[str]
is_positional: bool
is_required: bool
# the delim to use when splitting a list argument into each list element
delim: typing.Optional[str]
@dataclasses.dataclass
class ArgumentGroupPattern:
is_required: bool
args: list[ArgumentPattern]
@dataclasses.dataclass
class CommandPattern:
command: str
sub_commands: list[str]
arguments: list[ArgumentPattern]
groups: list[ArgumentGroupPattern]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Regex #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
__COMMAND_REGEX = re.compile("([a-zA-Z0-9_-]*)")
__IDENTIFIER_REGEX = re.compile("[a-zA-Z_0-9"
"]+")
__QUANTIFIER_REGEX = re.compile(r"\.\.\.|"
r"\*|"
r"{([0-9]+)}")
__SHORT_REGEX = r"-[a-zA-Z0-9]"
__LONG_REGEX = r"--[a-zA-Z0-9][a-zA-Z0-9]+(-[a-zA-Z0-9][a-zA-Z0-9]+)*"
__ARG_REGEX = re.compile(rf"{__SHORT_REGEX}|"
rf"{__LONG_REGEX}")
__DELIM_REGEX = re.compile(r":(.*)[\]>]")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Parsing #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def __parse_names(content: str) -> (list[str], int):
"""Parse the list of argument names leaving the leading '-' in tact.
todo: test long names with only one dash (ex. find -name)
"""
offset = 0
names = list()
while offset < len(content) and content[offset] not in "[=:]>":
if content[offset].isspace():
offset += 1
continue
m = __ARG_REGEX.match(content[offset::])
if m is None:
break
names.append(m.string[m.start(): m.end()])
offset += m.end()
return names, offset
def __parse_arg_num(content: str, is_optional: bool, is_flag: bool) -> (ArgNum, int):
"""Parse the quantifier for the argument."""
match = __QUANTIFIER_REGEX.match(content)
n = None
offset = 0
if match is None:
if is_flag:
quantifier = Quantifier.FLAG
elif is_optional:
quantifier = Quantifier.OPTIONAL
else:
quantifier = Quantifier.N
n = 1
else:
body = match.string[:match.end()]
if body == "...":
quantifier = Quantifier.ANY if is_optional else Quantifier.AT_LEAST_ONE
offset = 3
elif body == "*":
quantifier = Quantifier.ANY
offset = 1
elif body[0] == "{":
try:
n = int(match.group(1))
except ValueError as err:
raise PatternError(f"bad quantifier: {err}")
if n == 0:
quantifier = Quantifier.FLAG
n = None
else:
quantifier = Quantifier.N
offset = match.end()
else:
raise PatternError(f"unknown quantifier found: '{match.string[:match.end()]}")
if is_optional and (quantifier == Quantifier.N and n != 1):
raise PatternError("optional argument values must only have a Quantifier of 1")
return ArgNum(quantifier, n), offset
def __parse_var(content: str, is_positional: bool) -> (typing.Optional[str], ArgNum, int):
"""Parse the argument's meta var and argument count."""
if content[0] in "]>":
return (None,
ArgNum(Quantifier.N, 1) if is_positional else ArgNum(Quantifier.FLAG),
0)
offset = 0
has_brace = False
has_equal = False
if content[offset] == "[":
has_brace = True
offset += 1
if content[offset] == "=":
has_equal = True
offset += 1
if not is_positional and not has_brace and not has_equal:
raise PatternError(f"non-positional arguments with quantifier != 1 must have either '[', '=', or '[=' but found"
f"'{content[offset]}'")
if (match := __IDENTIFIER_REGEX.match(content[offset::])) is not None:
ident = match.string[:match.end():]
offset += match.end()
else:
ident = None
arg_num, size = __parse_arg_num(content[offset::],
(has_equal or is_positional) and has_brace,
not has_equal and not is_positional)
offset += size
if has_brace and content[offset] != "]":
raise PatternError(f"expected ']' but found '{content[offset]}")
elif has_brace:
offset += 1
return ident, arg_num, offset
def __parse_delim(content: str) -> (typing.Optional[str], int):
"""Parse a list delimiter from the given str."""
match = __DELIM_REGEX.match(content)
if match is None:
return None, 0
delim = match.group(1)
offset = len(delim) + 1 # length of delimiter + initial ':'
return delim if delim else None, offset
def parse_argument_pattern(content: str) -> (ArgumentPattern, int):
"""Attempt to parse an ArgumentPattern from a str.
Note: expects to receive the surrounding bracket (ie "[-d --dir]" not "-d --dir")
Grammar:
OPEN_BRACE := '[' | '<'
CLOSE_BRACE := ']' | '>'
IDENTIFIER := [A-Za-z_]+
SHORT := '-[a-zA-Z0-9]'
LONG := '--[a-zA-Z][a-zA-Z-]*'
N := '{' [0-9]+ '}'
DELIM := ':' + .*
PATTER := OPEN_BRACE (SHORT | LONG)* '['? '='? IDENT? N? ']' DELIM? CLOSE_BRACE
:param content: the string to parse.
:return: the parsed ArgumentPattern if successful.
"""
if len(content) == 0:
raise PatternError("content may not be empty")
if content[0] not in "[<" or content[-1] not in "]>":
raise PatternError("argument pattern must be wrapped in '[ ]' or '< >'")
open_brace = content[0]
is_required = open_brace == "<"
offset = 1
names, size = __parse_names(content[offset::])
offset += size
is_positional = len(names) == 0
ident, arg_num, size = __parse_var(content[offset::], is_positional)
offset += size
delim, size = __parse_delim(content[offset::])
offset += size
if (delim is not None and not (arg_num.quantifier == Quantifier.N and arg_num.count == 1
or arg_num.quantifier == Quantifier.OPTIONAL)):
raise PatternError(f"Only arguments taking 1 or optional values may specify a delimiter")
try:
if (close_brace := content[offset]) in "]>":
if open_brace == "<" and close_brace != ">" or open_brace == "[" and close_brace != "]":
raise PatternError(f"mismatching brace types, found '{open_brace}' and '{close_brace}'")
offset += 1
else:
raise PatternError(f"expected '{']' if open_brace == '[' else '>'}' but found '{content[offset]}")
except IndexError as err:
raise PatternError(f"error parsing arguments pattern: {err}")
if is_positional and not is_required:
raise PatternError("a positional argument may not be optional, you may specify either '?' or '*' as quantifiers")
if ident is None and len(names) > 0:
ident = (max(names, key=lambda l: len(l)).lstrip('-')
.upper().replace("-", "_"))
return ArgumentPattern(ident, arg_num, names, is_positional, is_required, delim), offset
def parse_argument_group_pattern(content: str) -> (ArgumentGroupPattern, int):
"""Attempt to parse an ArgumentGroup from a str.
Node: expected to receive the surrounding parentheses.
Basic grammar:
EXCLUSIVE := '!'
GROUP_PATTERN := '(' EXCLUSIVE? ARGUMENT_PATTERN+ ')'
:param content: the content to parse.
:return: The parsed ArgumentGroupPattern and the offset pointing to the next character in content.
"""
if len(content) == 0:
raise PatternError("content may not be empty")
if content[0] != "(" or content[-1] != ")":
raise PatternError("argument group pattern must be wrapped '( )'")
if content[1] == "!":
is_required = True
offset = 2
else:
is_required = False
offset = 1
arguments = list()
while offset < len(content) - 1:
if content[offset].isspace():
offset += 1
continue
arg_match = re.match(r"([\[<].*[]>])",
content[offset::])
if arg_match is not None:
arg, size = parse_argument_pattern(arg_match.group(1))
offset += size
if arg.is_required:
raise PatternError(f"argument group patterns may not contain required arguments")
arguments.append(arg)
continue
else:
raise PatternError(f"could not parse arguments in group '{content}'")
offset += 1
return ArgumentGroupPattern(is_required, arguments), offset
def parse_commands(content: str) -> (str, list[str], int):
"""Parse the command and sub_commands from the given content.
todo: consider supporting command aliases without copy and pasting entire command patterns
:param content; the content to parse.
:return: the parsed command, and the index of the next meaningful character in the string.
"""
cmd_match = re.match(rf"\s*([a-zA-Z0-9_-]*)\s*", content)
sub_cmd_match = re.match(r"((?:[a-zA-Z0-9_-]*\s*)*)\s*", content[cmd_match.end()::])
command = cmd_match.group(1)
sub_commands = sub_cmd_match.group(1).split()
offset = cmd_match.end() + sub_cmd_match.end()
return command, sub_commands, offset
def parse_command_pattern(content: str) -> CommandPattern:
"""Attempt to parse a CommandPattern from a str.
:param content: the content to parse.
:return: the parsed CommandPattern.
:raise: PatternError on any error parsing input.
"""
if len(content) == 0:
raise PatternError("content may not be empty")
command, sub_commands, offset = parse_commands(content)
arguments = list()
groups = list()
while offset < len(content):
if content[offset].isspace():
offset += 1
continue
# check for an argument
arg_match = re.match(r"([\[<].*[]>])",
content[offset::])
if arg_match is not None:
arg, size = parse_argument_pattern(arg_match.group(1))
offset += size
arguments.append(arg)
continue
group_match = re.match(r"(\(.*?\))",
content[offset::])
if group_match is not None:
group, size = parse_argument_group_pattern(group_match.group(1))
offset += size
groups.append(group)
continue
raise PatternError(f"unexpected value '{content[offset]}'")
return CommandPattern(command, sub_commands, arguments, groups)
``` |
{
"source": "joshmgrant/datasette",
"score": 3
} |
#### File: datasette/datasette/filters.py
```python
import json
import numbers
from .utils import detect_json1, escape_sqlite
class Filter:
key = None
display = None
no_argument = False
def where_clause(self, table, column, value, param_counter):
raise NotImplementedError
def human_clause(self, column, value):
raise NotImplementedError
class TemplatedFilter(Filter):
def __init__(
self,
key,
display,
sql_template,
human_template,
format="{}",
numeric=False,
no_argument=False,
):
self.key = key
self.display = display
self.sql_template = sql_template
self.human_template = human_template
self.format = format
self.numeric = numeric
self.no_argument = no_argument
def where_clause(self, table, column, value, param_counter):
converted = self.format.format(value)
if self.numeric and converted.isdigit():
converted = int(converted)
if self.no_argument:
kwargs = {"c": column}
converted = None
else:
kwargs = {"c": column, "p": "p{}".format(param_counter), "t": table}
return self.sql_template.format(**kwargs), converted
def human_clause(self, column, value):
if callable(self.human_template):
template = self.human_template(column, value)
else:
template = self.human_template
if self.no_argument:
return template.format(c=column)
else:
return template.format(c=column, v=value)
class InFilter(Filter):
key = "in"
display = "in"
def split_value(self, value):
if value.startswith("["):
return json.loads(value)
else:
return [v.strip() for v in value.split(",")]
def where_clause(self, table, column, value, param_counter):
values = self.split_value(value)
params = [":p{}".format(param_counter + i) for i in range(len(values))]
sql = "{} in ({})".format(escape_sqlite(column), ", ".join(params))
return sql, values
def human_clause(self, column, value):
return "{} in {}".format(column, json.dumps(self.split_value(value)))
class NotInFilter(InFilter):
key = "notin"
display = "not in"
def where_clause(self, table, column, value, param_counter):
values = self.split_value(value)
params = [":p{}".format(param_counter + i) for i in range(len(values))]
sql = "{} not in ({})".format(escape_sqlite(column), ", ".join(params))
return sql, values
def human_clause(self, column, value):
return "{} not in {}".format(column, json.dumps(self.split_value(value)))
class Filters:
_filters = (
[
# key, display, sql_template, human_template, format=, numeric=, no_argument=
TemplatedFilter(
"exact",
"=",
'"{c}" = :{p}',
lambda c, v: "{c} = {v}" if v.isdigit() else '{c} = "{v}"',
),
TemplatedFilter(
"not",
"!=",
'"{c}" != :{p}',
lambda c, v: "{c} != {v}" if v.isdigit() else '{c} != "{v}"',
),
TemplatedFilter(
"contains",
"contains",
'"{c}" like :{p}',
'{c} contains "{v}"',
format="%{}%",
),
TemplatedFilter(
"endswith",
"ends with",
'"{c}" like :{p}',
'{c} ends with "{v}"',
format="%{}",
),
TemplatedFilter(
"startswith",
"starts with",
'"{c}" like :{p}',
'{c} starts with "{v}"',
format="{}%",
),
TemplatedFilter("gt", ">", '"{c}" > :{p}', "{c} > {v}", numeric=True),
TemplatedFilter(
"gte", "\u2265", '"{c}" >= :{p}', "{c} \u2265 {v}", numeric=True
),
TemplatedFilter("lt", "<", '"{c}" < :{p}', "{c} < {v}", numeric=True),
TemplatedFilter(
"lte", "\u2264", '"{c}" <= :{p}', "{c} \u2264 {v}", numeric=True
),
TemplatedFilter("like", "like", '"{c}" like :{p}', '{c} like "{v}"'),
TemplatedFilter(
"notlike", "not like", '"{c}" not like :{p}', '{c} not like "{v}"'
),
TemplatedFilter("glob", "glob", '"{c}" glob :{p}', '{c} glob "{v}"'),
InFilter(),
NotInFilter(),
]
+ (
[
TemplatedFilter(
"arraycontains",
"array contains",
"""rowid in (
select {t}.rowid from {t}, json_each({t}.{c}) j
where j.value = :{p}
)""",
'{c} contains "{v}"',
)
]
if detect_json1()
else []
)
+ [
TemplatedFilter(
"date", "date", 'date("{c}") = :{p}', '"{c}" is on date {v}'
),
TemplatedFilter(
"isnull", "is null", '"{c}" is null', "{c} is null", no_argument=True
),
TemplatedFilter(
"notnull",
"is not null",
'"{c}" is not null',
"{c} is not null",
no_argument=True,
),
TemplatedFilter(
"isblank",
"is blank",
'("{c}" is null or "{c}" = "")',
"{c} is blank",
no_argument=True,
),
TemplatedFilter(
"notblank",
"is not blank",
'("{c}" is not null and "{c}" != "")',
"{c} is not blank",
no_argument=True,
),
]
)
_filters_by_key = {f.key: f for f in _filters}
def __init__(self, pairs, units={}, ureg=None):
self.pairs = pairs
self.units = units
self.ureg = ureg
def lookups(self):
"Yields (lookup, display, no_argument) pairs"
for filter in self._filters:
yield filter.key, filter.display, filter.no_argument
def human_description_en(self, extra=None):
bits = []
if extra:
bits.extend(extra)
for column, lookup, value in self.selections():
filter = self._filters_by_key.get(lookup, None)
if filter:
bits.append(filter.human_clause(column, value))
# Comma separated, with an ' and ' at the end
and_bits = []
commas, tail = bits[:-1], bits[-1:]
if commas:
and_bits.append(", ".join(commas))
if tail:
and_bits.append(tail[0])
s = " and ".join(and_bits)
if not s:
return ""
return "where {}".format(s)
def selections(self):
"Yields (column, lookup, value) tuples"
for key, value in self.pairs:
if "__" in key:
column, lookup = key.rsplit("__", 1)
else:
column = key
lookup = "exact"
yield column, lookup, value
def has_selections(self):
return bool(self.pairs)
def convert_unit(self, column, value):
"If the user has provided a unit in the query, convert it into the column unit, if present."
if column not in self.units:
return value
# Try to interpret the value as a unit
value = self.ureg(value)
if isinstance(value, numbers.Number):
# It's just a bare number, assume it's the column unit
return value
column_unit = self.ureg(self.units[column])
return value.to(column_unit).magnitude
def build_where_clauses(self, table):
sql_bits = []
params = {}
i = 0
for column, lookup, value in self.selections():
filter = self._filters_by_key.get(lookup, None)
if filter:
sql_bit, param = filter.where_clause(
table, column, self.convert_unit(column, value), i
)
sql_bits.append(sql_bit)
if param is not None:
if not isinstance(param, list):
param = [param]
for individual_param in param:
param_id = "p{}".format(i)
params[param_id] = individual_param
i += 1
return sql_bits, params
``` |
{
"source": "joshmgrant/pybay-pytest-the-awesome-parts-code",
"score": 2
} |
#### File: joshmgrant/pybay-pytest-the-awesome-parts-code/test_app.py
```python
import pytest
from selenium import webdriver
@pytest.fixture
def browser():
d = webdriver.Firefox()
yield d
d.quit()
def test_message_appears(browser):
browser.get("http://localhost:3000")
browser.find_element_by_class_name('scale-type-c').send_keys('0')
assert browser.find_element_by_class_name('temperatureMesssage').is_displayed()
def test_message_is_correct(browser):
browser.get("http://localhost:3000")
browser.find_element_by_class_name('scale-type-c').send_keys('0')
assert browser.find_element_by_class_name('temperatureMesssage').text == '0 Celsius is 32 Fahrenheit'
```
#### File: joshmgrant/pybay-pytest-the-awesome-parts-code/test_calculations.py
```python
from calculations import TemperatureConverter
def testfreezing_fahrenheit():
converter = TemperatureConverter()
actual = converter.to_celsius(32.0)
expected = 0.0
assert abs(expected - actual) < 0.01
def test_freezing_celsius():
converter = TemperatureConverter()
actual = converter.to_fahrenheit(0)
expected = 32.0
assert abs(expected - actual) < 0.01
``` |
{
"source": "joshmgrant/Python-Pytest-Nerodia",
"score": 3
} |
#### File: Python-Pytest-Nerodia/tests/test_invalid_login.py
```python
import pytest
def test_locked_out_user(browser):
browser.goto('http://www.saucedemo.com')
browser.text_field(data_test='username').value = 'locked_out_user'
browser.text_field(data_test='password').value ='<PASSWORD>'
browser.button(type='submit').click()
assert browser.button(class_name='error-button').exists
```
#### File: Python-Pytest-Nerodia/tests/test_remove_item_from_cart.py
```python
import pytest
def test_removes_one(browser):
browser.goto('www.saucedemo.com/inventory.html')
browser.button(class_name='add-to-cart-button').click()
browser.button(class_name='add-to-cart-button').click()
browser.button(class_name='remove-from-cart-button').click()
assert browser.span(class_name='shopping_cart_badge').text == '1'
browser.goto("https://www.saucedemo.com/cart.html")
assert len(browser.divs(class_name='inventory_item_name')) == 1
``` |
{
"source": "joshmgrant/sauce-api-testing-example",
"score": 2
} |
#### File: sauce-api-testing-example/tests/test_entries.py
```python
import pytest
post_testdata = [{
'title': "A Sauce",
'description': 'Some kind of hot sauce',
'heat_level': 'medium'
},{
'title': "Sauce with No Description",
'description': '',
'heat_level': 'mild'
}, {
'title': "Sauce with No Heat Level",
'description': 'A pretty good sauce',
'heat_level': ''
}, {
'title': "Sauce with Nothing",
'description': '',
'heat_level': ''
}, {
'title': "",
'description': '',
'heat_level': ''
}]
def entries_get(saucelog):
response = saucelog.get("/entries")
assert response.status_code_is(200)
@pytest.mark.parametrize("example_post", post_testdata)
def entries_post(saucelog, example_post):
response = saucelog.post("/entries", data=example_post)
assert response.status_code_is(200)
``` |
{
"source": "joshmgrant/svp",
"score": 2
} |
#### File: joshmgrant/svp/conftest.py
```python
import pytest
from samples.corsica_api import CorsicaAPI
from svp.api_object import APIAssert
@pytest.fixture(scope="function")
def api(request):
yield CorsicaAPI()
@pytest.fixture(scope="function")
def api_assert(request):
yield APIAssert()
``` |
{
"source": "joshmiller17/context-injection",
"score": 3
} |
#### File: joshmiller17/context-injection/main.py
```python
from __future__ import print_function
from __future__ import division
import pickle
# init global args
args = None
# find subdirectories
def get_subdirs(dir):
return [d for d in os.listdir(dir)
if os.path.isdir(os.path.join(dir, d))]
# build the readability model using a directory of labeled text files
def build_readability_model():
debug = args.debug
read_dir = args.read
verbose = args.verbose
file_count = 0
file_names = []
subdirs = get_subdirs(read_dir)
if not subdirs:
print("ERROR: no subdirectories found in " + read_dir)
return None
elif debug:
print("DEBUG: list of subdirectories is ", subdirs)
labels = []
for category in subdirs:
label = -1
while label < 1 or label > 100:
label = input("Please input a readability value for " + category + " from 1 (easy) to 100 (difficult)\n >")
for dirpath, dirnames, filenames in os.walk(os.path.join(read_dir, category)):
for file in filenames:
if file.endswith(".txt"):
file_names.append(os.path.join(dirpath, file))
labels.append(label)
else:
if debug:
print("DEBUG: " + file + " is not a .txt file")
assert len(labels) == len(file_names)
data, model, weights = readability.construct_readability_model(file_names, labels, verbose=args.verbose)
print("INFO: Readability model constructed.")
if verbose:
readability.print_features(model)
return model
# identifies a list of jargon terms using TFIDF
# returns list
def find_jargon_using_tfidf():
input_doc = args.input
background_dir = args.bg
max_terms = args.maxterms
stem = not args.nostem
debug = args.debug
#if background is not provided, load saved tfidf_dictionary
if background_dir != None:
tfidf_dictionary = tfidf.build_tfidf_model(background_dir, debug=args.debug, verbose=args.verbose, stem=(not args.nostem))
else:
tfidf_dictionary = pickle.load(open('tfdict.pkl', 'r'))
if tfidf_dictionary is None:
return None
# find suitable cutoff point if none given
file = open(input_doc + '.txt', 'r')
file_size = len(file.read())
if debug:
print("DEBUG: input file size is", file_size)
if max_terms is None:
max_terms = min(5, int(round(file_size / 1000)) + 1)
if debug:
print("DEBUG: Using at most", max_terms, "terms for TFIDF model.")
file.close()
input_dict = tfidf.build_tfidf_model(input_doc, file=True)
if input_dict is None:
return None
jargon_dict = {} # saved as dict for debugging
for word in input_dict:
if debug:
print("TRACE: input jargon found - " + word + " " + str(input_dict[word]))
if word in tfidf_dictionary:
jargon_dict[word] = tfidf_dictionary[word]
if debug:
print("--GOOD! Found a match.")
elif debug:
print("...No matching jargon in background; discarding.")
jargon_sorted = []
terms_remaining = int(max_terms)
if debug:
print("DEBUG: TFIDF scores for chosen jargon terms")
for word, score in sorted(jargon_dict.iteritems(), key=lambda (key,val): (val,key)):
if terms_remaining < 1:
break
if debug:
print(" ", word, " = ", score)
jargon_sorted.append(word)
terms_remaining -= 1
return jargon_sorted
# identifies a list of jargon terms using The Termolator
# saves results to output_termolator.txt
def find_jargon_using_termolator(input_doc, background_dir, output_doc):
# weird solution: run with -oldbg to generate .tchunk then without -oldbg to use .tchunk
# Make background dir into a .list file containing their names
open("background.list", 'w').close()
with open("background.list", 'a') as bg:
for dirpath, dirnames, filenames in os.walk(background_dir):
for file in filenames:
if file.endswith(".txt"):
bg.write(dirpath + "/" + file + "\n")
cmd = []
if not args.multi:
cmd.append("The_Termolator/run_termolator_with_1_file_foreground.sh") # program
else:
cmd.append("The_Termolator/run_termolator.sh") # program
cmd.append(args.input) # foreground
cmd.append("background.list") # background
cmd.append(".txt") # extension
cmd.append(args.output) # output name
cmd.append(str(not args.oldbg)) # if true, process background
cmd.append(str(args.internet)) # use internet for relevance scoring
cmd.append(str(args.allterms)) # considered terms
cmd.append(str(args.maxterms)) # accepted terms
cmd.append("The_Termolator") # directory of Termolator
cmd.append("False") # "additional topic string", should always be false
cmd.append("False") # if true skip preprocess foreground # = args.oldfg?
cmd.append(str(args.input)) # ??? webscore
cmd.append("False") # if false, use ranking.pkl
print("INFO: executing command: " + ' '.join(cmd))
call(cmd)
termolator_jargon_terms = []
# get files from generated terms file
file_name = output_doc + ".out_term_list"
with open(file_name, 'r') as file:
lines = file.readlines()
for line in lines:
words = line.split()
termolator_jargon_terms.append(words[0])
return termolator_jargon_terms
def main():
# init sys args
print("Successfully loaded.")
input_doc = args.input
output_doc = args.output
read_dir = args.read
background_dir = args.bg
skip_read = args.noread
skip_term = args.noterm
skip_tfidf = args.notfidf
errors = False
# -----------------------------------
# Build Models
# -----------------------------------
if read_dir != "" and read_dir != None and not skip_read:
model = build_readability_model()
elif not skip_read:
# try to read from file: readability_model_weights.csv
# = readability model saved when program last run
# else if no file found, give warning that build must happen first
try:
model = joblib.load("readability_model.pkl", 'r')
except Exception as e:
print("ERROR: No readability model found on file. Please build readability model before continuing.")
if debug:
print("DEBUG: " + str(e))
errors = True
return errors
if not skip_read:
# future work? include topic density in the readability model?
# calculate readability of document
if not args.multi:
input_as_feature_vec = readability.feature_extraction([input_doc + ".txt"], verbose=args.verbose)
prediction = readability.predict(model, input_as_feature_vec)
print("Readability prediction:" + "\n" + str(prediction))
open("read_output_" + input_doc + ".txt", 'w').close()
with open("read_output_" + input_doc + ".txt", 'a') as file:
file.write(str(prediction))
else:
for dirpath, dirnames, filenames in os.walk(input_doc):
for file in filenames:
if file.endswith(".txt"):
full_name = os.path.join(dirpath, file)
out_file = os.path.join(dirpath, "read_output_" + file)
input_as_feature_vec = readability.feature_extraction([full_name], verbose=args.verbose)
prediction = readability.predict(model, input_as_feature_vec)
print("Readability prediction:" + "\n" + str(prediction))
open(out_file, 'w').close()
with open(out_file, 'a') as file:
file.write(str(prediction))
# -----------------------------------
# Find Jargon
# -----------------------------------
if not skip_tfidf:
tfidf_jargon_terms = find_jargon_using_tfidf()
open("tfidf_output_" + input_doc + ".txt", 'w').close()
with open("tfidf_output_" + input_doc + ".txt", 'a') as file:
for j in tfidf_jargon_terms:
file.write(str(j) + "\n")
if background_dir:
if not skip_term:
termolator_jargon_terms = find_jargon_using_termolator(input_doc, background_dir, output_doc)
# TODO context injection using the termolator's list of jargon
# TODO save as [input_doc]_injected.txt
#if not skip read
# input_as_feature_vec = readability.feature_extraction([input_doc + "_injected.txt"], verbose=verbose)
# prediction = readability.predict(read_model, input_as_feature_vec)
# print("New readability with context injected: " + str(prediction))
if not skip_tfidf and not skip_term:
open("overlapping_jargon_" + input_doc + ".txt", 'w').close()
with open("overlapping_jargon_" + input_doc + ".txt", 'a') as file:
if not (skip_tfidf or skip_term):
for tf_jargon in tfidf_jargon_terms:
for term_jargon in termolator_jargon_terms:
match = True
for ch in range(len(tf_jargon)):
if tf_jargon[ch] != term_jargon:
match = False
if match:
file.write(term_jargon + "\n")
return errors
if __name__ == "__main__":
import argparse
prog_desc = "Input and output files assumed to be .txt, but do not include file extension." + \
"Reads input from input file and analyzes jargon.\n" + \
"Outputs jargon-identified text using TFIDF to output_tfidf.\n" + \
"Outputs jargon-identified text using The Termolator to output_termolator.\n"
epi = "Note that the directory of files for training the readability model is assumed to be split into several subdirectories." + \
"\nWhen building the model, you will be prompted to give a readability value for each subdirectory, assumed to be a category representing one" + \
"level of readability. The readability uses a scale of 1 (easy) to 100 (difficult).\n"
parser = argparse.ArgumentParser(description=prog_desc, epilog=epi)
parser.add_argument('-debug', action='store_true', help="Show debug info")
parser.add_argument('-verbose', action='store_true', help="Show verbose output")
parser.add_argument('-noread', action='store_true', help="Skip readability modeling")
parser.add_argument('-notfidf', action='store_true', help="Skip TFIDF modeling")
parser.add_argument('-noterm', action='store_true', help="Skip Termolator modeling")
parser.add_argument('-nostem', action='store_true', help="Skip word-stemming for TFIDF")
parser.add_argument('-maxterms', type=int, default=100, help="Max jargon terms accepted for Termolator and TFIDF")
parser.add_argument('-allterms', type=int, default=1000, help="Max jargon terms considered for Termolator")
parser.add_argument('-read', help="Directory for corpus that trains readability model. Can be left out if readability model was already built.")
parser.add_argument('-bg', help="Directory for corpus that trains TFIDF and Termolator models")
parser.add_argument('-oldbg', action='store_true', help="Background already processed (don't process again)")
parser.add_argument('-oldfg', action='store_true', help="Foreground already processed (don't process again)")
parser.add_argument('-multi', action='store_true', help="Input is a directory - finds jargon across all files")
parser.add_argument('-internet', action='store_true', help="Use web for improved scoring (slow)")
parser.add_argument('input', help="Input filename or directory")
parser.add_argument('output', help="Output filename")
args = parser.parse_args()
if args.multi and not (args.notfidf):
parser.error("Multiple input files not implemented for TFIDF. Please use -notfidf when using -multi.")
if not (args.noterm) and args.bg is None:
parser.error("Background directory required to run Termolator.")
# resolved later as needed
#if not args.noread and args.read is None:
# parser.error("Readability directory required to run readability model.")
if args.input.endswith(".txt") or args.output.endswith(".txt"):
parser.error("Input and output filenames should not include file extensions.")
print("\nLoading system operations...")
import sys, os
from sklearn.externals import joblib
from subprocess import call
print("\nLoading Context Injection...")
import readability
import tfidf
import define
errcode = main()
if not errcode:
print("Finished successfully.")
else:
print("Exited with errors.")
```
#### File: joshmiller17/context-injection/tfidf.py
```python
from __future__ import print_function
from __future__ import division
import nltk
import numpy as np
import os
import pickle
from readability import pre_process
from collections import defaultdict
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.stem.porter import PorterStemmer
from sklearn.externals import joblib
#import corenlp_pywrap as nlp
def files_to_texts(files):
texts = []
for i in range(len(files)):
file = files[i]
text = (open(file, 'r')).read()
texts.append(text)
return texts
# nltk stemmer
def get_stems(input):
try:
tokenized = nltk.word_tokenize(input)
except LookupError:
print("INFO: punkt not found, downloading now")
nltk.download("punkt")
tokenized = nltk.word_tokenize(input)
stems = []
for token in tokenized:
stems.append(stem(token))
return stems
def stem(token):
return PorterStemmer().stem(token)
def no_stem_tokenizer(input):
try:
tokenized = nltk.word_tokenize(input)
except LookupError:
print("INFO: punkt not found, downloading now")
nltk.download("punkt")
tokenized = nltk.word_tokenize(input)
return tokenized
# sklearn tfidf
def train_tfidf(docs, stem=True, verbose=False, save_results=False):
if len(docs) < 1:
print("ERROR: No documents to train TFIDF on")
return None
if verbose:
print("INFO: pre-processing " + str(len(docs)) + " documents.")
processed_docs = []
for doc in docs:
clean = pre_process(doc)
processed_docs.append(clean)
if verbose:
print("INFO: Training TFIDF model, stemming set to " + str(stem))
print("INFO: Training TFIDF Vectorizer... (this may take a long time)")
if stem:
tfidf = TfidfVectorizer(tokenizer=get_stems, stop_words='english')
else:
tfidf = TfidfVectorizer(tokenizer=no_stem_tokenizer, stop_words='english')
result = tfidf.fit_transform(processed_docs)
scores = zip(tfidf.get_feature_names(), np.asarray(result.sum(axis=0)).ravel())
tfdict = defaultdict(float)
for score in scores:
tfdict[score[0]] = score[1]
if save_results:
pickle.dump(tfdict, open("tfdict.pkl", "wb"))
return tfdict
def get_tfidf(tfdict, word):
token = nltk.word_tokenize(word)
stem = stem(token)
return tfdict[stem]
def count_coreferences(phrase, context):
# future work? count coreferences of phrase in context
raise NotImplementedError
def train_model(data, labels):
model = sklearn.linear_model.LinearRegression(normalize=True)
model.fit(data, labels)
return model # can get weights from model.get_params
def predict(model, datum):
return model.predict(datum)
# future work? use joblib to save and load models
# input: directory of files
# output: tfidf dictionary
def build_tfidf_model(background_dir, file=False, debug=False, verbose=False, stem=True):
if debug:
print("DEBUG: Building TFIDF model, stem=" + str(stem))
files = []
if file:
files = [background_dir + ".txt"]
else:
for dirpath, dirnames, filenames in os.walk(background_dir):
for file in filenames:
files.append(os.path.join(dirpath, file))
if debug:
print(str(len(files)) + " files found in background " + background_dir)
if len(files) < 10:
for f in files:
print(f)
texts = files_to_texts(files)
if len(texts) < len(files):
print("ERROR: Some files were unable to be processed. " + len(texts) + " / " + len(files))
tfdict = train_tfidf(texts, verbose=verbose, stem=stem)
joblib.dump(tfdict, "tfidf_model.pkl")
return tfdict
def main():
# hardcoded test data
files = ["practice_data/train_data_1.txt", "practice_data/train_data_2.txt", "practice_data/train_data_3.txt"]
texts = files_to_texts(files)
tfdict = train_tfidf(texts)
print("done")
if __name__ == "__main__":
main()
``` |
{
"source": "joshmiller17/foldit-view-data",
"score": 2
} |
#### File: joshmiller17/foldit-view-data/viewer_script.py
```python
from __future__ import division, print_function
from collections import defaultdict
from sklearn.cluster import AgglomerativeClustering
import operator
import scipy
# Python 2.x deprecated
# try:input = raw_input
# except NameError: pass
# try: from StringIO import StringIO
# except ImportError: from io import StringIO
# try: from future.utils import iteritems
# except ModuleNotFoundError: pass
GROUP_FOLDER = "groupuser_stats"
ENTROPIES_FILE = "entropies.csv"
FREQUENCIES_FILE = "frequencies.csv"
EXPERTS_FILE = "experts.csv"
MIN_HIGHSCORES_PER_EXPERT = 2
MIN_SAMPLES_PER_GROUP = 100
UID_LENGTH = 33
PID_LENGTH = 7
"""
VIEW (conceptual struct):
Metadata
RPRP_PUZZLE_RANKS
type - 1=soloist, 2=evolver
pid - puzzle
uid - user
best_score - note: scores are in rosetta energy, so lower is better
cur_score
gid - group id
RPNODE__PUZZLE
nid - puzzle id
vid - version id
title
View Data
OPTIONS
(basically everything except maybe timestamp and error flag)
QUERY EXAMPLES
Count values of an option:
select advanced_mode, count(advanced_mode) from options group by advanced_mode
USEFUL CODE TIDBITS
Convert unicode back into normal string:
my_unicode_string.encode('ascii','ignore')
"""
FREQ_COUNT_QUERY = '''select %s, count(%s) from options group by %s;'''
PIDS_BY_CAT = {}
ENTROPY_DICT = {}
OPT_FREQ_DICT = {}
EXPERTS = []
META_CATEGORIES = ["Design", "Prediction", "Electron Density", "Hand-Folding"]
# For these options, there is a lot of missing data - replace missing with default value
MISSING_DEFAULTS = {
"puzzle_dialog__show_beginner": 0,
"rank_popups": 1,
"selection_mode": 0,
"selection_mode__show_notes": 1,
"tooltips": 1,
"view_options__guide_pulse": 1,
"view_options__show_backbone_issues": 0,
"view_options__show_residue_burial": 0,
"view_options__sym_chain_colors": 0,
}
BINARY_OPTIONS = [
"advanced_mode",
"electron_density_panel__backface_culling",
"music",
"puzzle_dialog__show_beginner",
"puzzle_dialog__show_old",
"rank_popups",
"selection_mode",
"selection_mode__show_notes",
"sound",
"switch_middle_right_click",
"switch_residue_colors",
"tooltips",
"view_options__dark_background",
"view_options__gui_fade",
"view_options__guide_pulse",
"view_options__relative_score_coloring",
"view_options__show_backbone_issues",
"view_options__show_bondable_atoms",
"view_options__show_buried_polars",
"view_options__show_clashes",
"view_options__show_contact_map_geoms",
"view_options__show_hbonds",
"view_options__show_helix_hbonds",
"view_options__show_issues",
"view_options__show_non_protein_hbonds",
"view_options__show_other_hbonds",
"view_options__show_outlines",
"view_options__show_residue_burial",
"view_options__show_sidechain_hbonds",
"view_options__show_sidechains_with_issues",
"view_options__show_voids",
"view_options__sym_chain_colors",
"view_options__working_pulse_style",
]
CAT_KEYS = ["view_options__current_visor", "view_options__render_style", "view_options__sidechain_mode"]
CAT_OPTIONS = {
"view_options__current_visor": ["AAColor", "AbegoColor", "CPK", "EnzDes", "Hydro", "Hydro/Score", "Hydro/Score+CPK", "Hydrophobic", "Ligand Specific", "Rainbow", "Score", "Score/Hydro", "Score/Hydro+CPK"],
"view_options__render_style": ["Cartoon", "Cartoon Ligand", "Cartoon Thin", "Line", "Line+H", "Line+polarH", "Sphere", "Stick", "Stick+H", "Stick+polarH", "Trace Line", "Trace Tube"],
"view_options__sidechain_mode": ["Don't Show (Fast)", "Show All (Slow)", "Show Stubs"]
}
ALL_USED_OPTIONS = [opt for opt in BINARY_OPTIONS]
for cat_opt in CAT_KEYS:
for opt in CAT_OPTIONS[cat_opt]:
ALL_USED_OPTIONS.append(opt)
TOTAL_DIMS = len(ALL_USED_OPTIONS)
FULL_OPTIONS_LIST = [
"advanced_mode",
"autoshow_chat__global",
"autoshow_chat__group",
"autoshow_chat__puzzle",
"autoshow_chat__veteran",
"autoshow_notifications",
"chat__auto_reconnect",
"chat__disable_non_group",
"chat__enable_public_profanity_filter",
"cleanup_temp_files",
"electron_density_panel__alpha",
"electron_density_panel__backface_culling",
"electron_density_panel__color",
"electron_density_panel__threshold",
"electron_density_panel__visualization",
"graph_options__graph_length_value",
"graph_options__graph_memory_value",
"gui__desired_fps",
"gui__desired_window_height",
"gui__desired_window_width",
"gui__file_dir",
"gui__image_dir",
"login_dialog__disable_timeouts",
"login_dialog__player",
"login_dialog__proxy",
"login_dialog__use_proxy",
"music",
"puzzle_dialog__show_beginner",
"puzzle_dialog__show_old",
"rank_popups",
"reduce_bandwidth",
"render__option__shader_outline",
"selection_mode",
"selection_mode__show_notes",
"sound",
"switch_middle_right_click",
"switch_residue_colors",
"tooltips",
"update_group", # several options, but probably split into =="main" true or false
"view_options__current_visor",
"view_options__dark_background",
"view_options__gui_fade",
"view_options__guide_pulse",
"view_options__relative_score_coloring",
"view_options__render_style",
"view_options__show_backbone_issues",
"view_options__show_bondable_atoms",
"view_options__show_buried_polars",
"view_options__show_clashes",
"view_options__show_contact_map_geoms",
"view_options__show_hbonds",
"view_options__show_helix_hbonds",
"view_options__show_issues",
"view_options__show_non_protein_hbonds",
"view_options__show_other_hbonds",
"view_options__show_outlines",
"view_options__show_residue_burial",
"view_options__show_sidechain_hbonds",
"view_options__show_sidechains_with_issues",
"view_options__show_voids",
"view_options__sidechain_mode",
"view_options__sym_chain_colors",
"view_options__sym_chain_visible", # not enough valid data to make use of
"view_options__working_pulse_style",
]
# --------------- TEST BED -------------------------
# place to run tests
def test(args):
print("Beginning Tests...")
#sse_plot(weighted=True)
#cluster_plot("", "dendro_all_unique_6.png", weighted=False) # only need to run once, can comment out after
# cluster mapping is now saved to clusters.csv
clusters = {} # view : cluster
with open("clusters.csv", 'r') as f:
reader = csv.reader(f)
next(reader) # skip header
for row in reader:
clusters[row[1]] = row[0]
#count_view_frequencies()
#count_view_freq_test()
#print(count_results("where is_expert = 1"))
#add_is_selected_novice_to_options(reselect=True)
chi_square_analysis(clusters)
#group_cluster_analysis(clusters)
print("Done.")
def count_view_freq_test():
with open("view_frequencies.csv", 'r') as f:
reader = csv.reader(f)
next(reader)
sum = 0
for row in reader:
if row == []:
continue
try:
sum += int(row[0])
except Exception as e:
print("row = " + str(row))
exit(1)
print("total views: " + str(sum))
def count_results(where):
c.execute('''select count(*) from options %s''' % where)
results = c.fetchall()
return results[0][0]
def chi_square_analysis(clusters):
print("CQA: getting all dists")
nonexpert_dist = sum_view_dists_by_user(clusters, query_to_views('''where is_selected_novice == 1'''))
print(nonexpert_dist)
expert_dist = sum_view_dists_by_user(clusters, query_to_views('''where is_expert == 1'''))
#hs_views = query_to_views('''where best_score_is_hs == 1 ''')
#hs_count = len(hs_views)
#hs_dist = sum_view_dists_by_user(clusters, hs_views)
#nonhs_dist = sum_view_dists_by_user(clusters, query_to_views('''where best_score_is_hs == 0 order by random() limit %d''' % hs_count))
cat_expert_dists = []
cat_nonexpert_dists = []
#cat_hs_dists = []
#cat_nonhs_dists = []
for cat in META_CATEGORIES:
print("CQA: getting all queries by " + str(cat))
cat_expert_dists.append(sum_view_dists_by_user(clusters, query_to_views('''where is_expert == 1 and instr(puzzle_cat, \"%s\")''' % cat)))
cat_nonexpert_dists.append(sum_view_dists_by_user(clusters, query_to_views('''where is_selected_novice == 1 and instr(puzzle_cat, \"%s\")''' % cat)))
print(cat_nonexpert_dists)
#hs_views = query_to_views('''where best_score_is_hs == 1 and instr(puzzle_cat, \"%s\")''' % cat)
#hs_count = len(hs_views)
#cat_hs_dists.append(sum_view_dists_by_user(clusters, hs_views))
#cat_nonhs_dists.append(sum_view_dists_by_user(clusters, query_to_views('''where best_score_is_hs == 0 and instr(puzzle_cat, \"%s\") order by random() limit %d''' % (cat,hs_count))))
null_expert = create_null_hypothesis_table(cat_expert_dists)
null_nonexpert = create_null_hypothesis_table(cat_nonexpert_dists)
chi_sq("expertise_bycat_transposed", cat_expert_dists, cat_nonexpert_dists, transpose=True)
chi_sq("catvsnull_expert_transposed", cat_expert_dists, null_expert, transpose=True)
chi_sq("catvsnull_nonexpert_transposed", cat_nonexpert_dists, null_nonexpert, transpose=True)
return
print("CQA: doing analysis")
chi_sq("expertise_main", expert_dist, nonexpert_dist)
chi_sq("expertise_bycat", cat_expert_dists, cat_nonexpert_dists)
chi_sq("expertise_main_cont", expert_dist, nonexpert_dist, contingency=True)
chi_sq("expertise_bycat_cont", cat_expert_dists, cat_nonexpert_dists, contingency=True)
#chi_sq("hs_main", hs_dist, nonhs_dist)
#chi_sq("hs_bycat", cat_hs_dists, cat_nonhs_dists)
print("CQA: vs null")
null_expert = create_null_hypothesis_table(cat_expert_dists)
null_nonexpert = create_null_hypothesis_table(cat_nonexpert_dists)
#null_hs = create_null_hypothesis_table(cat_hs_dists)
#null_nonhs = create_null_hypothesis_table(cat_nonhs_dists)
chi_sq("catvsnull_expert", cat_expert_dists, null_expert)
chi_sq("catvsnull_nonexpert", cat_nonexpert_dists, null_nonexpert)
chi_sq("catvsnull_expert_cont", cat_expert_dists, null_expert, contingency=True)
chi_sq("catvsnull_nonexpert_cont", cat_nonexpert_dists, null_nonexpert, contingency=True)
#chi_sq("catvsnull_hs", cat_hs_dists, null_hs)
#chi_sq("catvsnull_nonhs", cat_nonhs_dists, null_nonhs)
# input: a num_categories x num_clusters table of view distributions
# output: what that table would look like if num_categories didn't affect distribution
def create_null_hypothesis_table(table):
new_table = [row[:] for row in table] # copy
# get the scales, sum of each row and column
SR = [sum(table[row]) for row in range(len(table))]
SC = [sum(x) for x in zip(*table)]
scale = sum(SC)
# normalize the columns
for col in range(len(new_table[0])): # table[row][col]
for row in range(len(new_table)):
new_table[row][col] = SC[col] * SR[row] / scale
return new_table
# no extension
def chi_sq(filename, table1, table2, contingency=False, transpose=False):
import pickle
pickle.dump(table1, open(filename + "1.p", 'wb'))
pickle.dump(table2, open(filename + "2.p", 'wb'))
with open(filename + '.txt', 'w') as f:
if contingency:
chi_sq, p, dof, expected = scipy.stats.chi2_contingency(table1, correction=True) # Yates correction
else:
if transpose:
chi_sq, p = stats.chisquare(table1, table2, axis=1)
else:
chi_sq, p = stats.chisquare(table1, table2)
f.write("X^2=" + str(chi_sq))
f.write("\np=" + str(p))
if contingency:
f.write("\ndof=" + str(dof))
f.write("\nObserved\n")
for t in table1:
f.write('\n')
f.write(str(t))
f.write("\nExpected\n")
for t in table2:
f.write('\n')
f.write(str(t))
def sum_view_dists_by_group(cluster_mapping, views, stats=True):
counter = [0, len(views)]
current_group = ""
group_views = {}
exp_count = [0, 0]
dists = {} # gid : dist
experts = {} # gid : [total_users, experts]
for key in sorted(views):
counter[0] += 1
print('\r' + str(counter[0]) + '/' + str(counter[1]), end='', flush=True)
gid = key.split('/')[0]
if current_group == "": # handle first group
current_group = str(gid)
if current_group == str(gid): # still on same group, append
group_views[key] = views[key]
else: # switch group, flush out and start over
if group_views != {}:
view_distribution, exp_stats = sum_view_dists_by_user(cluster_mapping, group_views, stats=True, square=True)
dists[current_group] = view_distribution
experts[current_group] = exp_stats
current_group = str(gid)
group_views = {}
group_views[key] = views[key]
# finally
if group_views != {}:
view_distribution, exp_stats = sum_view_dists_by_user(cluster_mapping, group_views, stats=True, square=True)
dists[current_group] = view_distribution
experts[current_group] = exp_stats
print("")
return dists, experts
def sum_view_dists_by_user(cluster_mapping, views, stats=False, square=False):
counter = [0, len(views)]
current_user = ""
users_views = {}
dist = [0.0] * 6
exp_stats = [0, 0] # total_users, experts
for key in sorted(views):
counter[0] += 1
print('\r' + str(counter[0]) + '/' + str(counter[1]), end='', flush=True)
if current_user == "": # handle first user
current_user = key_to_uid(key)
if current_user == key_to_uid(key): # still on same user, append
users_views[key] = views[key]
else: # switch users, flush out and start over
if users_views != {}:
view_distribution = views_to_normalized_cluster_distribution(users_views, cluster_mapping)
if current_user in EXPERTS:
exp_stats[1] += 1
exp_stats[0] += 1
if square:
view_distribution = [x**2 for x in view_distribution] # NEW square the distribution so specializations stand out
dist = [sum(x) for x in zip(view_distribution, dist)] # add
current_user = key_to_uid(key)
users_views = {}
users_views[key] = views[key]
# finally
view_distribution = views_to_normalized_cluster_distribution(users_views, cluster_mapping)
if square:
view_distribution = [x**2 for x in view_distribution] # NEW square the distribution so specializations stand out
dist = [sum(x) for x in zip(view_distribution, dist)] # add
if current_user in EXPERTS:
exp_stats[1] += 1
exp_stats[0] += 1
if stats:
return dist, exp_stats
return dist
# ------------------------------------------------------------
def views_to_normalized_cluster_distribution(views, cluster_mapping, num_clusters=6):
view_distribution = [0.0] * num_clusters
if len(views) == 0: # short-circuit for no views
return view_distribution
for (id,view) in views.items():
list = view_dict_to_list(view)
list_clean(list)
view_string = view_list_to_string(list)
cluster = cluster_mapping[view_string]
view_distribution[int(cluster)] += 1
# normalize
scale = numpy.sum(view_distribution)
for i in range(len(view_distribution)):
view_distribution[i] /= scale
return view_distribution
def group_cluster_analysis(cluster_mapping, num_clusters=6):
gids = get_valid_gids()
groups = []
counter = [0, len(gids)]
shannons = []
valid_groups = 0
shannon_file = "group_shannons.csv"
views = query_to_views("")
dists, expert_counts = sum_view_dists_by_group(cluster_mapping, views)
for gid in gids:
if str(gid) not in dists.keys():
print("missed group: " + str(gid))
with open(shannon_file, 'w') as gs_file:
writer = csv.writer(gs_file)
writer.writerow(["gid", "total_users", "experts", "percent_experts", "shannon"])
for gid in sorted(dists):
gid = str(gid)
if expert_counts[gid][0] < 2: # at least 2 users
continue
group_dist = dists[gid]
if numpy.mean(group_dist) > 0.1: # avoid empty groups of all 0s
print("\nGroup " + str(gid) + " (" + str(expert_counts[gid][1]) + "/" + str(expert_counts[gid][0]) + ")")
print("Group freq dist: " + str(group_dist))
shan = shannon(group_dist)
print("Shannon index: " + str(shan))
if shan != "nan":
if gid == "0":
total_users = expert_counts[gid][0]
experts = expert_counts[gid][1]
percent_experts = experts * 100.0 / total_users
writer.writerow([gid, total_users, experts, percent_experts, shan])
continue
shannons.append(shan)
valid_groups += 1
total_users = expert_counts[gid][0]
experts = expert_counts[gid][1]
percent_experts = experts * 100.0 / total_users
writer.writerow([gid, total_users, experts, percent_experts, shan])
else:
print("\nInvalid shannon: " + str(gid) + "\n")
else:
print("\nEmpty group: " + str(gid) + "\n")
shannons_mean = numpy.mean(shannons)
shannons_std = numpy.std(shannons)
print("\nAverage group Shannon index: " + str(shannons_mean))
print("Std Dev group Shannon index: " + str(shannons_std))
print("Valid groups: " + str(valid_groups))
# ------------------------------------------- END ANALYSIS -------------------------------------------
# ----------------------------------------------------------------------------------------------------
def get_cluster_centroids():
clusters = {} # view : cluster
with open("clusters.csv", 'r') as f:
reader = csv.reader(f)
next(reader) # skip header
for row in reader:
clusters[row[1]] = row[0]
cl = []
for i in range(6):
cl.append([])
for (view,num) in clusters.items():
cl[num].append(view)
print("clusters built")
for cluster in cl:
print("Centroid for cluster " + str(num) + " is " + str(centroid(cluster)))
def key_to_uid(key):
splitter = key.split('/') # remove gid
uidplus = splitter[1]
return uidplus[:UID_LENGTH]
def test_group_stats():
gids = get_valid_gids()
groups = []
for gid in gids:
if gid == 0: # user not in a group
continue
c.execute('''select distinct uid from rprp_puzzle_ranks where is_expert == 0 and gid == \"%s\"; ''' % gid)
num_novices = len([result[0] for result in c.fetchall()])
c.execute('''select distinct uid from rprp_puzzle_ranks where is_expert == 1 and gid == \"%s\"; ''' % gid)
num_experts = len([result[0] for result in c.fetchall()])
num_users = num_novices + num_experts
if num_users < 2: # remove small groups
continue
g = {}
g["id"] = gid
g["experts"] = num_experts
g["total"] = num_users
g["percent"] = num_experts / num_users
groups.append(g)
sorted_groups = multikeysort(groups, ["-percent", "-total"])
for gr in sorted_groups:
percent = '{:.1%}'.format(gr["percent"])
print("Group " + str(gr["id"]) + ": " + str(gr["experts"]) + "/" + str(gr["total"]) + " experts (" + percent + ")")
def count_view_popularity(data, file):
dataset = defaultdict(int)
for view in data:
key = ""
for ele in view:
if ele is 1 or ele is 0:
key += str(ele)
dataset[key] += 1
sorted_dict = sorted(dataset.items(), key=operator.itemgetter(1), reverse=True)
with open(file, 'w') as f:
writer = csv.writer(f)
header = ["count"]
for opt in ALL_USED_OPTIONS:
header.append(opt)
writer.writerow(header)
for key, value in sorted_dict:
row = [value]
for i in range(len(key)):
row.append(key[i])
writer.writerow(row)
def list_to_set(data):
dataset = set([])
for d in data:
dataset.add(tuple(d))
return dataset
def sse_plot(weighted=True, max=15):
plt.figure(figsize=(10,7))
views = query_to_views("")
data = []
for (id,view) in views.items():
if weighted:
weighted_view = apply_inverse_frequency_weighting(view)
data.append((view_dict_to_list(weighted_view)))
else:
data.append((view_dict_to_list(view)))
unicode_clean(data)
data = list_to_set(data) # convert to set to remove duplicates
data = list(data) # convert back because we need as list
graph_sses(data, max=max)
def cluster_plot(where, filename, n_clusters=6, weighted=False):
plt.figure(figsize=(10,7))
views = query_to_views(where)
weighted_views = []
for view in views:
if weighted:
weighted_views.append(apply_inverse_frequency_weighting(view))
else:
weighted_views.append(view)
data = []
for (id,view) in views.items():
data.append((view_dict_to_list(view)))
unicode_clean(data)
if args.debug:
print("Total views: " + str(len(data)))
data = list_to_set(data) # convert to set to remove duplicates
if args.debug:
print("Unique views: " + str(len(data)))
data = list(data) # convert back because we need as list
dend = shc.dendrogram(shc.linkage(data, method='ward'))
plt.savefig(filename)
clusters_to_stats(data, num_clusters=n_clusters)
def get_sse(cluster):
cent = centroid(cluster)
sse = 0
for view in cluster:
for dim in range(len(view)):
sse += abs(view[dim] - cent[dim]) ** 2
return sse
def graph_sses(data, max=3):
print("INFO: Beginning graph of SSE by clusters for up to " + str(max) + " clusters")
sses = []
cluster_counts = []
for i in range(1, max+1):
print("INFO: Calculating SSE for " + str(i) + " cluster(s)")
data_buckets = clusters_to_buckets(data, num_clusters=i)
sse_avg = 0
for key in data_buckets.keys():
print("---Cluster " + str(key+1))
sse_avg += get_sse(data_buckets[key])
sse_avg /= i
sses.append(sse_avg)
cluster_counts.append(i)
plt.plot(cluster_counts, sses, linewidth=2)
plt.savefig("cluster_scree.png")
def clusters_to_buckets(data, num_clusters=6):
cluster = AgglomerativeClustering(n_clusters=num_clusters, affinity='euclidean', linkage='ward')
cluster.fit_predict(data)
data_buckets = {}
for j in range(num_clusters):
data_buckets[j] = []
cluster_labels = cluster.labels_
for i in range(len(cluster_labels)):
data_buckets[cluster_labels[i]].append(data[i])
return data_buckets
def clusters_to_stats(data, num_clusters=6):
cluster = AgglomerativeClustering(n_clusters=num_clusters, affinity='euclidean', linkage='ward')
cluster.fit_predict(data)
data_buckets = {}
for j in range(num_clusters):
data_buckets[j] = []
cluster_labels = cluster.labels_
for i in range(len(cluster_labels)):
data_buckets[cluster_labels[i]].append(data[i])
for c in range(len(data_buckets.keys())):
print("Analyzing cluster " + str(c))
centroid_stats(cluster=data_buckets[c])
with open("clusters.csv", 'w') as c_file:
writer = csv.writer(c_file)
writer.writerow(["cluster_num", "view"])
for num, views in data_buckets.items():
for view in views:
view_str = view_list_to_string(view)
writer.writerow([num, view_str])
def view_list_to_string(view):
view_str = ''
for v in view:
if v == 0 or v == 1:
view_str += str(v)
return view_str
# prints out number of missing entries for each option
# reads 2000 entries at a time
def count_missing():
rows_counted = 0
missing_dict = {}
list_of_options = []
for o in BINARY_OPTIONS:
missing_dict[o] = 0
list_of_options.append(o)
for cat in CAT_OPTIONS:
for o in CAT_OPTIONS[cat]:
missing_dict[o] = 0
list_of_options.append(o)
views = query_to_views("limit 2000 offset " + str(rows_counted)) # whole db, iteratively
while views is not None:
ll = []
for (id,view) in views.items():
ll.append(view_dict_to_list(view))
for l in ll:
for i in range(len(list_of_options)):
if l[i] is None or l[i] == "None":
missing_dict[list_of_options[i]] += 1
if rows_counted % 2000 == 0 and l is ll[0]:
if i == 0:
print("\nRows counted = " + str(rows_counted))
print(list_of_options[i] + ": " + str(missing_dict[list_of_options[i]]))
if i >= len(list_of_options)-1:
print("\nRows counted = " + str(rows_counted))
rows_counted += 2000
views = query_to_views("limit 2000 offset " + str(rows_counted)) # whole db, iteratively
# Report stats for a centroid described by a where query
def centroid_stats(where="", cluster=None, name=""):
if cluster == []: # given empty cluster
return
# Get total centroid
if cluster is None:
views = query_to_views(where)
cluster = []
for (id,view) in views.items():
cluster.append(view_dict_to_list(view))
print("Analyzing this centroid: " + where)
unicode_clean(cluster)
data_count = str(len(cluster))
print("Density (" + data_count + "):")
d = density(cluster)
print(d)
print("Centroid (" + data_count + "):")
c = centroid(cluster)
print(c)
if name == "":
name = where
if len(c) < 1:
print("WARN: empty centroid")
return
with open(name + '.csv', 'w') as c_file:
writer = csv.writer(c_file)
writer.writerow(["dim", "M", "std"])
dimensions = [opt for opt in BINARY_OPTIONS]
for cat_opt in CAT_KEYS:
for opt in CAT_OPTIONS[cat_opt]:
dimensions.append(opt)
for i in range(len(dimensions)):
writer.writerow([dimensions[i], c[i], d[i]])
def print_experiment_details():
from datetime import datetime as dt
# print start and end dates of the experiment
c.execute('''select min(time), max(time) from options;''')
result = c.fetchall()[0]
start = dt.utcfromtimestamp(result[0])
end = dt.utcfromtimestamp(result[1])
print("experiment start datetime: " + str(start))
print("experiment end datetime: " + str(end))
# num unique users
c.execute('''select count(distinct(uid)) from options;''')
result = c.fetchall()[0][0]
num_unique_users = result
print("num unique users: " + str(num_unique_users))
# num unique puzzles
c.execute('''select count(distinct(pid)) from options;''')
results = c.fetchall()
num_unique_puzzles = results[0][0]
print("num unique puzzles: " + str(num_unique_puzzles))
# num unique puzzles per category
valid_puzzle_cats = get_valid_puzzle_categories()
print("num unique puzzles per category")
for cat in valid_puzzle_cats:
search_cat = cat
c.execute('''select count(distinct(pid)) from options where instr(puzzle_cat, \"%s\")''' % search_cat)
category_count = c.fetchall()[0][0]
print('''%s : %d''' % (cat, category_count))
# num total samples before filtering
if os.path.isfile('folditx.db'):
temp_conn = sqlite3.connect('folditx.db')
temp_c = temp_conn.cursor()
temp_c.execute('''select count(*) from options;''')
results = temp_c.fetchall()
total_data_samples_before_filtering = results[0][0]
print("total data samples (pre-filter): " + str(total_data_samples_before_filtering))
else:
print("ERR: Could not find database with name folditx.db")
# num total data samples
c.execute('''select count(*) from options;''')
result = c.fetchall()[0][0]
total_data_samples_after_filtering = result
print("total data samples (post-filter): " + str(total_data_samples_after_filtering))
# mean/std dev of options samples per user
c.execute('''select count(uid) from options group by uid;''')
results = c.fetchall()
num_samples_per_user = [result[0] for result in results]
mean_spu = round(calculate_mean(num_samples_per_user), 2)
stddev_spu = round(calculate_stddev(num_samples_per_user, mean_spu), 2)
print("mean of options samples per user: " + str(mean_spu))
print("std dev of options samples per user: " + str(stddev_spu))
return
def calculate_mean(data):
return (sum(data) * 1.0) / len(data)
def calculate_variance(data, mean):
return sum([(x - mean)**2 for x in data]) / (len(data) - 1)
def calculate_stddev(data, mean):
variance = calculate_variance(data, mean)
return variance**0.5
# ------------ END TEST BED -----------------------
def multikeysort(items, columns):
from operator import itemgetter
comparers = [((itemgetter(col[1:].strip()), -1) if col.startswith('-') else
(itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer)
# ------------ ONE TIME FUNCTIONS -----------------
def get_valid_puzzle_categories():
c.execute('''select puzzle_cat, count(puzzle_cat) from options group by puzzle_cat;''')
puzzle_category_results = c.fetchall()
puzzle_categories = defaultdict(int)
for result in puzzle_category_results:
if result[1] > 0:
cats = result[0].split(', ')
for cat in cats:
puzzle_categories[cat] += result[1]
if args.debug:
print("DEBUG: puzzle category sample count:")
print(puzzle_categories)
return puzzle_categories.keys()
def get_valid_gids():
c.execute('''select gid, count(gid) from rprp_puzzle_ranks group by gid;''')
group_results = c.fetchall()
groups = []
for result in group_results:
if result[1] > MIN_SAMPLES_PER_GROUP:
groups.append(result[0])
return groups
def highscore_similarities(puzzle_categories):
# Cluster by high score / not, report clustering statistics
print("Calculating high score similarities")
all_highscores = []
for cat in puzzle_categories:
search_cat = cat
c.execute('''select uid, pid from rprp_puzzle_ranks where best_score_is_hs = 1 and instr(puzzle_cat, \"%s\"); ''' % search_cat)
highscore_results = c.fetchall()
print("\nINFO: " + str(len(highscore_results)) + " high score results for " + str(cat) + "\n")
highscores_in_cat = []
for uid, pid in highscore_results:
print('.',end='')
sys.stdout.flush()
views_per_user_per_cat = query_to_views('''where uid = \"%s\" and pid = %d ''' % (uid, pid))
for idkey, view in views_per_user_per_cat.items():
highscores_in_cat.append(view_dict_to_list(view))
centroid_name = "highscore_" + str(cat)
centroid_stats(cluster=highscores_in_cat, name=centroid_name)
all_highscores += highscores_in_cat
centroid_name = "all_highscores"
centroid_stats(cluster=all_highscores, name=centroid_name)
def group_similarities(gids, puzzle_categories):
print("Calculating group and player similarities")
gid_counter = [0, len(gids)]
for gid in gids:
if gid == 0: # user not in a group
continue
c.execute('''select distinct uid from rprp_puzzle_ranks where gid == \"%s\"; ''' % gid)
user_results = c.fetchall()
users = [result[0] for result in user_results]
print("\n-------------------------")
gid_counter[0] += 1
print("INFO: Processing group " + str(gid_counter[0]) + " / " + str(gid_counter[1]))
print("INFO: group " + str(gid) + " has " + str(len(users)) + " users")
print("-------------------------\n")
lists_per_group = []
lists_per_group_per_cat = {}
for user in users:
lists_per_user = []
for cat in puzzle_categories:
print(".", end='')
sys.stdout.flush()
if cat not in lists_per_group_per_cat:
lists_per_group_per_cat[cat] = []
views_per_user_per_cat = query_to_views('''where uid = \"%s\" and instr(puzzle_cat,\"%s\") ''' % (user, cat))
lists_per_user_per_cat = []
for idkey, view in views_per_user_per_cat.items():
lists_per_user_per_cat.append(view_dict_to_list(view))
if lists_per_user_per_cat != []:
print('\n')
centroid_name = "u_" + str(user) + "_c_" + str(cat) + "_count_" + str(len(lists_per_user_per_cat))
centroid_stats(cluster=lists_per_user_per_cat, name=os.path.join(GROUP_FOLDER, centroid_name))
lists_per_user += lists_per_user_per_cat
lists_per_group_per_cat[cat] += lists_per_user_per_cat
centroid_name = "u_" + str(user) + "_count_" + str(len(lists_per_user))
centroid_stats(cluster=lists_per_user, name=os.path.join(GROUP_FOLDER, centroid_name))
lists_per_group += lists_per_user
centroid_name = "g_" + str(gid) + "_count_" + str(len(lists_per_group))
centroid_stats(cluster=lists_per_group, name=os.path.join(GROUP_FOLDER, centroid_name))
for cat in puzzle_categories:
centroid_name = "g_" + str(gid) + "_c_" + str(cat) + "_count_" + str(len(lists_per_group_per_cat[cat]))
centroid_stats(cluster=lists_per_group_per_cat[cat], name=os.path.join(GROUP_FOLDER, centroid_name))
def incremental_similarity_averages(files_and_counts, user=False):
# average the standard deviations across all files
inc_avgs = [0] * TOTAL_DIMS
inc_weight = 0
for name, count in files_and_counts.items():
prefix = "g_"
if user:
prefix = "u_"
csvfile = prefix + str(name) + "_count_" + str(count) + ".csv"
fullpath = os.path.join(GROUP_FOLDER, csvfile)
sum = map(lambda x: x * inc_weight, inc_avgs)
with open(fullpath, 'r') as f:
reader = csv.reader(f)
next(reader) # skip header
i = 0
for row in reader:
sum[i] += (count * float(row[2])) # std
i += 1
if i != len(sum): # assert
print("ERR: assertion failed, file was " + str(i) + " rows long, there are " + str(len(sum)) + " total dimensions.")
exit(1)
inc_weight += count
for j in range(len(sum)):
sum[j] /= (inc_weight)
inc_avgs = sum
prefix = "group"
if user:
prefix = "user"
with open(prefix + '_similarities.csv', 'w') as g:
writer = csv.writer(g)
writer.writerow(['dim','average_std'])
for i in range(len(inc_avgs)):
writer.writerow([ALL_USED_OPTIONS[i], inc_avgs[i]])
def incremental_similarity_averages_by_cat(files_and_counts, user=False):
for metacat in META_CATEGORIES:
# average the standard deviations across all files
inc_avgs = [0] * TOTAL_DIMS
inc_weight = 0
for name, count in files_and_counts.items():
# NOTE: count is wrong for every cat, use glob to find file
prefix = "g_"
if user:
prefix = "u_"
csv_prefix = prefix + str(name) + "_c_" + metacat + "_count_"
fullpath = os.path.join(GROUP_FOLDER, csv_prefix)
filename = ''
for file in glob.glob(fullpath + '*.csv'):
filename = file
sum = map(lambda x: x * inc_weight, inc_avgs)
with open(file, 'r') as f:
reader = csv.reader(f)
next(reader) # skip header
i = 0
for row in reader:
sum[i] += (count * float(row[2])) # std
i += 1
if i != len(sum): # assert
print("ERR: assertion failed, file was " + str(i) + " rows long, there are " + str(len(sum)) + " total dimensions.")
exit(1)
inc_weight += count
for j in range(len(sum)):
sum[j] /= (inc_weight)
inc_avgs = sum
prefix = "group"
if user:
prefix = "user"
with open(prefix + '_' + metacat + '_similarities.csv', 'w') as g:
writer = csv.writer(g)
writer.writerow(['dim','average_std'])
for i in range(len(inc_avgs)):
writer.writerow([ALL_USED_OPTIONS[i], inc_avgs[i]])
def groupuser_analysis():
groups_and_counts = {}
users_and_counts = {}
for root, dir, files in os.walk("."):
for file in files:
if file.endswith(".csv"):
if file.startswith("g_"):
if "_c_" not in file: # category
m = re.search('_count_(\d*)\.csv$', file)
count = int(m.group(1))
m = re.search('^g_(\d*)_', file)
gid = m.group(1)
groups_and_counts[gid] = count
elif file.startswith("u_"):
if "_c_" not in file: # category
m = re.search('_count_(\d*)\.csv$', file)
count = int(m.group(1))
m = re.search('^u_([a-zA-Z0-9]*)_', file)
uid = m.group(1)
users_and_counts[uid] = count
incremental_similarity_averages_by_cat(groups_and_counts)
incremental_similarity_averages_by_cat(users_and_counts, user=True)
incremental_similarity_averages(groups_and_counts)
incremental_similarity_averages(users_and_counts, user=True)
def count_view_frequencies():
views = query_to_views("")
data = []
for (id,view) in views.items():
data.append((view_dict_to_list(view)))
unicode_clean(data)
count_view_popularity(data, "view_frequencies.csv")
for metacat in META_CATEGORIES:
views = query_to_views("where instr(puzzle_cat,\"" + metacat + "\")")
data = []
for (id,view) in views.items():
data.append((view_dict_to_list(view)))
unicode_clean(data)
count_view_popularity(data, metacat + "_view_frequencies.csv")
# Calculate and print full report of interesting stats
def main_stats():
global c
print("INFO: Beginning main stats tests")
"""
MAIN STATS
Metacategories (META_CATEGORIES):
1. Design
2. Prediction
3. Electron Density (subcategory of Prediction)
4. Hand Folding (subcategory of Prediction)
Overall and per-metacategory: Experts vs Novices
Groups/Users
1. How much variance is there within a group?
a. Overall and per-metacategory
2. How much variance is there within a user?
a. Overall and per-metacategory
Clustering
1. Run clustering algorithm, described
2. Describe the centroids of each cluster, interpret
3. In what clusters do the most modal views fall under?
a. Overall and per-metacategory
4. Group/user details
a. Within each group/user, how many samples fall in each cluster?
b. How many clusters does the group spread out over?
Final questions:
1. What are the popular settings?
2. What does this mean beyond Foldit?
"""
fast = True # change to false if running for the first time
if not fast:
print("INFO: Expertise analysis")
# Overall and per-metacategory Experts vs Novices
for mc in META_CATEGORIES:
search_mc = mc
centroid_stats(where='''where is_expert == 0 and instr(puzzle_cat, \"%s\")''' % search_mc, name=mc + "Novice")
centroid_stats(where='''where is_expert == 1 and instr(puzzle_cat, \"%s\")''' % search_mc, name=mc + "Expert")
centroid_stats(where="where is_expert == 0", name="OverallNovice")
centroid_stats(where="where is_expert == 1", name="OverallExpert")
print("INFO: Expertise analysis")
# Overall and per-metacategory Experts vs Novices
centroid_stats(where="where is_expert == 0", name="OverallNovice")
centroid_stats(where="where is_expert == 1", name="OverallExpert")
for mc in META_CATEGORIES:
centroid_stats(where="where is_expert == 0 and instr(puzzle_cat,\"" + mc + "\")", name= mc + "Novice")
centroid_stats(where="where is_expert == 1 and instr(puzzle_cat,\"" + mc + "\")", name= mc + "Expert")
# Groups/Users
print("INFO: Loading group and puzzle category data")
gids = get_valid_gids()
puzzle_categories = get_valid_puzzle_categories()
print("INFO: Highscore analysis")
highscore_similarities(puzzle_categories)
print("INFO: Group analysis")
group_similarities(gids, puzzle_categories)
print("INFO: User analysis")
groupuser_analysis()
# Clustering
cluster_plot("", "dendro_all_unique.png")
views_to_cnum = {} # map view : cluster num, e.g. 110001101 : 3
with open("clusters.csv", 'r') as c_file:
reader = csv.reader(c_file)
next(reader)
for row in reader:
views_to_cnum[row[1]] = row[0]
fast2 = False
if not fast2:
# In what clusters do the most modal views fall under? Overall / per-metacategory
count_view_frequencies()
cluster_counts = defaultdict(int)
with open("view_frequencies.csv", 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
view_str = ""
for r in row[1:]:
if r == 0 or r == 1:
view_str += str(r)
if view_str == "":
continue
cluster_num = views_to_cnum[view_str]
cluster_counts[cluster_num] += row[0]
with open('cluster_counts.csv', 'w') as cc:
writer = csv.writer(cc)
writer.writerow(["cluster", "freq"])
for num, freq in cluster_counts.items():
writer.writerow([num, freq])
for metacat in META_CATEGORIES:
cluster_counts = defaultdict(int)
with open(metacat + "_view_frequencies.csv", 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
view_str = ""
for r in row[1:]:
if r == 0 or r == 1:
view_str += str(r)
if view_str == "":
continue
cluster_num = views_to_cnum[view_str]
cluster_counts[cluster_num] += row[0]
with open(metacat + '_cluster_counts.csv', 'w') as cc:
writer = csv.writer(cc)
writer.writerow(["cluster", "freq"])
for num, freq in cluster_counts.items():
writer.writerow([num, freq])
# 4. Group/user details
# a. Within each group/user, how many samples fall in each cluster?
# b. How many clusters does the group spread out over?
# Final questions:
# 1. What are the popular settings?
# 2. What does this mean beyond Foldit?
print_experiment_details()
def freq_all():
for o in BINARY_OPTIONS + CAT_OPTIONS.keys():
try:
c.execute(FREQ_COUNT_QUERY % (o,o,o))
print(o.upper())
print(c.fetchall())
except Exception as e:
print("Invalid option: " + str(o))
def update_dictionary_apply_fn_options_freq(dictionary, fn):
for o in BINARY_OPTIONS:
try:
c.execute(FREQ_COUNT_QUERY % (o,o,o))
results = c.fetchall()
# note that it returns (None,0) as result 0, I haven't figured out how to silence that
count_0 = results[0][1]
count_1 = results[1][1]
dictionary[o] = fn(count_0, count_1)
except Exception as e:
print(e)
print("Invalid option: " + str(o))
# create dictionary for binarized cat options for each unique entry in options (uid + pid + time)
# {unique id : {cat_option_val : bool}}
binarized_cat_options_dict = query_to_views("", True)
cat_options_dict_per_unique_id = binarized_cat_options_dict.values()
num_unique_ids = len(binarized_cat_options_dict.keys())
all_cat_option_values = cat_options_dict_per_unique_id[0].keys()
for v in all_cat_option_values:
count_1 = sum([d[v] for d in cat_options_dict_per_unique_id])
count_0 = num_unique_ids - count_1
dictionary[v] = fn(count_0, count_1)
return dictionary
def get_all_experts():
# get all users
c.execute('''select distinct uid from rprp_puzzle_ranks''')
users = c.fetchall()
print("Identifying experts:")
expert_dict = {}
user_count = 0
for user in users:
user_count += 1
num_hs = count_expertise(user)
if num_hs >= MIN_HIGHSCORES_PER_EXPERT:
expert_dict[user[0]] = num_hs
print('!',end='')
if user_count % 5 == 0:
print('.',end='')
sys.stdout.flush()
sorted_experts = sorted(expert_dict.items(), key=operator.itemgetter(1))
with open('experts.csv', 'w') as expert_file:
writer = csv.writer(expert_file)
writer.writerows(sorted_experts)
def get_all_entropies(output=False):
if not is_db_clean:
raise Exception("Database must be clean to run get_all_entropies")
global ENTROPY_DICT
ENTROPY_DICT = defaultdict(float)
update_dictionary_apply_fn_options_freq(ENTROPY_DICT, entropy)
if output:
sorted_dict = sorted(ENTROPY_DICT.items(), key=operator.itemgetter(1), reverse=True)
for option, en in sorted_dict:
print(option + "," + str(en))
def get_all_freq_binarized_options(output=False):
if not is_db_clean:
raise Exception("Database must be clean to run get_all_freq_binarized_options")
global OPT_FREQ_DICT
OPT_FREQ_DICT = defaultdict(float)
update_dictionary_apply_fn_options_freq(OPT_FREQ_DICT, true_frequency)
if output:
sorted_dict = sorted(OPT_FREQ_DICT.items(), key=operator.itemgetter(1), reverse=True)
for option, freq in sorted_dict:
print(option + "," + str(freq))
# Returns a dictionary mapping each puzzle id to its maximum high score threshold (score required
# to be in top 5% of rankings (95th percentile), lower scores are better
def get_all_puzzle_highscores_dict():
c.execute('''select distinct pid from rprp_puzzle_ranks''')
results = c.fetchall()
puzzle_ids = [result[0] for result in results]
# dictionary that maps pid to score at the 95th percentile for the puzzle
pid_highscore = {}
for pid in puzzle_ids:
pid_highscore[pid] = get_highscore(pid)
return pid_highscore
# add best_score_is_hs and cur_score_is_hs cols to specified table
# impt note: must call this function on rprp_puzzle_ranks prior to calling on other tables
def add_is_highscore_cols(table):
# must get is_hs on ranks table before options table
if table != "rprp_puzzle_ranks":
c.execute('''PRAGMA table_info(rprp_puzzle_ranks)''')
results = c.fetchall()
rprp_puzzle_ranks_cols = [result[1].encode('ascii', 'ignore') for result in results]
if "best_score_is_hs" not in rprp_puzzle_ranks_cols or "cur_score_is_hs" not in rprp_puzzle_ranks_cols:
raise Exception("Must call add_is_highscore_cols('rprp_puzzle_ranks') first")
# add best_score_is column to specified table
try:
c.execute('''ALTER TABLE %s ADD best_score_is_hs INT DEFAULT -1''' % table)
print('''INFO: Created best_score_is_hs column in %s. Calculating best_score_is_hs ...''' % table)
except Exception as e:
print('''INFO: best_score_is_hs column already exists in %s. Recalculating best_score_is_hs...''' % table)
# add cur_score_is_hs column to specified table
try:
c.execute('''ALTER TABLE %s ADD cur_score_is_hs INT DEFAULT -1 NOT NULL''' % table)
print('''INFO: Created cur_score_is_hs column in %s. Calculating cur_score_is_hs ...''' % table)
except Exception as e:
print('''INFO: cur_score_is_hs column already exists in %s. Recalculating cur_score_is_hs...''' % table)
# get dictionary {pid : high score} from rprp_puzzle_ranks table
pid_highscore_dict = get_all_puzzle_highscores_dict()
if table == "rprp_puzzle_ranks":
update_is_highscore_cols_for_table("rprp_puzzle_ranks", pid_highscore_dict)
elif table == "options":
update_is_highscore_cols_for_table("options", pid_highscore_dict)
else:
raise Exception("is high score columns not relevant to specified table: " + str(table))
# update best_score_is_hs and cur_score_is_hs columns in specified table using dictionary {pid : highscore}
def update_is_highscore_cols_for_table(table, pid_highscore_dict):
if table == "rprp_puzzle_ranks":
for pid in pid_highscore_dict.keys():
highscore = pid_highscore_dict[pid]
c.execute('''update rprp_puzzle_ranks set best_score_is_hs = 0 where pid == %d and best_score > %d'''
% (pid, highscore))
c.execute('''update rprp_puzzle_ranks set best_score_is_hs = 1 where pid == %d and best_score <= %d'''
% (pid, highscore))
c.execute('''update rprp_puzzle_ranks set cur_score_is_hs = 0 where pid == %d and cur_score > %d'''
% (pid, highscore))
c.execute('''update rprp_puzzle_ranks set cur_score_is_hs = 1 where pid == %d and cur_score <= %d'''
% (pid, highscore))
elif table == "options":
print("options.best_score_is_hs is DEPRECATED")
return
print("updating options best_score_is_hs")
c.execute('''select distinct uid, pid from options''')
entries = c.fetchall()
count = 0
for entry in entries:
uid = entry[0]
pid = entry[1]
try:
c.execute('''select best_score_is_hs from rprp_puzzle_ranks where uid == \"%s\" and pid == %d;''' % (uid, pid))
best_score_is_hs = c.fetchone()
if best_score_is_hs == None:
best_score_is_hs = -1
else:
best_score_is_hs = best_score_is_hs[0]
print(best_score_is_hs)
c.execute('''update options set best_score_is_hs = %d where uid == \"%s\" and pid == %d;''' % (best_score_is_hs, uid, pid))
count += 1
except Exception as e:
print(e)
if count % 10 == 0:
print('.',end='')
sys.stdout.flush()
return
else:
raise Exception("No is high score columns for specified table: " + str(table))
# save changes to database
conn.commit()
# Add is_expert col to specified table
def add_is_expert_col(table):
try:
c.execute('''ALTER TABLE %s ADD is_expert INT DEFAULT 0 NOT NULL''' % table)
print('''INFO: Created is_expert column in %s. Calculating is_expert ...''' % table)
except Exception as e:
print('''INFO: is_expert column already exists in %s. Recalculating is_expert...''' % table)
# Get list of experts
if not os.path.isfile(EXPERTS_FILE):
raise Exception("ERR: Experts file not found: " + EXPERTS_FILE)
experts_list = []
with open(EXPERTS_FILE, 'r') as experts_file:
reader = csv.reader(experts_file)
for row in reader:
experts_list.append(row[0])
c.execute('''update %s set is_expert = 1 where uid in %s''' % (table, str(tuple(experts_list))))
conn.commit()
# Add is_selected_novice col to options table where the number of novices = number of experts
def add_is_selected_novice_to_options(reselect=False):
# Check if is_expert col exists in options
c.execute('''PRAGMA table_info(options)''')
results = c.fetchall()
options_cols = [result[1] for result in results]
if "is_expert" not in options_cols:
print("INFO: Adding is_expert col to options table...")
add_is_expert_col("options")
# Get number of experts in options
c.execute('''select count(distinct uid) from options where is_expert = 1''')
results = c.fetchall()
num_experts = results[0][0]
print("INFO: There are ", num_experts, " in the options table")
# Add is_selected_novice col to options
try:
c.execute('''ALTER TABLE options ADD is_selected_novice INT DEFAULT 0 NOT NULL''')
print('''INFO: Created is_selected_novice column in options. Selecting novices...''')
except Exception as e:
if reselect:
print("INFO: is_selected_novice already exists in options. Re-selecting novices... ")
c.execute('''update options set is_selected_novice = 0 where is_selected_novice = 1''')
else:
print('''INFO: is_selected_novice already exists in options. Exiting selection...''')
return
uids = []
c.execute('''select distinct uid from options where is_expert = 0 and instr(puzzle_cat, \"Design\")''')
results = c.fetchall()
for result in results:
uids.append(result[0])
print("design: " + str(len(uids)))
c.execute('''select distinct uid from options where is_expert = 0 and instr(puzzle_cat, \"Prediction\")''')
results = c.fetchall()
puids = []
for result in results:
puids.append(result[0])
for uid in uids:
if uid not in puids:
uids.remove(uid)
print("design+pred: " + str(len(uids)))
c.execute('''select distinct uid from options where is_expert = 0 and instr(puzzle_cat, \"Electron Density\")''')
results = c.fetchall()
euids = []
for result in results:
euids.append(result[0])
for uid in uids:
if uid not in euids:
uids.remove(uid)
print("design+pred+ed: " + str(len(uids)))
c.execute('''select distinct uid from options where is_expert = 0 and instr(puzzle_cat, \"Hand-Folding\")''')
results = c.fetchall()
huids = []
for result in results:
huids.append(result[0])
for uid in uids:
if uid not in huids:
uids.remove(uid)
print("design+pred+ed+hf: " + str(len(uids)))
uids = uids[:num_experts]
c.execute('''update options set is_selected_novice = 1 where uid in %s''' % (str(tuple(uids))))
print("INFO: Selected ", len(uids), " novices randomly from the options table")
conn.commit()
# Add puzzle_cat col to options table
def add_puzzle_cat_col_to_options():
try:
c.execute('''ALTER TABLE options ADD puzzle_cat TEXT DEFAULT ""''')
print('''INFO: Created puzzle_cat column in options. Calculating puzzle_cat ...''')
except Exception as e:
print('''INFO: puzzle_cat column already exists in options. Recalculating puzzle_cat...''')
for cat in PIDS_BY_CAT.keys():
puzzle_ids = map(int, PIDS_BY_CAT[cat])
c.execute('''update options set puzzle_cat = puzzle_cat || '%s' where pid in %s'''
% (", " + str(cat), str(tuple(puzzle_ids))))
conn.commit()
# Add puzzle_cat col to rprp_puzzle_ranks table
def add_puzzle_cat_col_to_ranks():
try:
c.execute('''ALTER TABLE rprp_puzzle_ranks ADD puzzle_cat TEXT DEFAULT ""''')
print('''INFO: Created puzzle_cat column in rprp_puzzle_ranks. Calculating puzzle_cat ...''')
except Exception as e:
print('''INFO: puzzle_cat column already exists in rprp_puzzle_ranks. Recalculating puzzle_cat...''')
for cat in PIDS_BY_CAT.keys():
puzzle_ids = map(int, PIDS_BY_CAT[cat])
c.execute('''update rprp_puzzle_ranks set puzzle_cat = puzzle_cat || '%s' where pid in %s'''
% (", " + str(cat), str(tuple(puzzle_ids))))
conn.commit()
# ------------ WRITE TO CSV FUNCTIONS -----------------
# Input: a list of dictionaries of {column name : val} for each entry in the desired table
# to create, the name of the csv file to create
# Output: creates a csv file from the given dictionary data
def write_csv_from_dict(dict_data, name):
csv_file_name = name
csv_columns = dict_data[0].keys()
try:
with open(csv_file_name, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
except IOError:
raise Exception("I/O error")
# Input: where query for query_to_views
# Output: creates a csv file for the options data containing relevant columns for
# viewing/visualization and hierarchical clustering
def write_options_csv(where):
test_write_options_csv(where) # TEST
return
options_views_dicts = query_to_views(where)
options_csv_dict = {}
c.execute('''select uid, pid, time from options %s''' % where)
results = c.fetchall()
for result in results:
uid = result[0]
pid = result[1]
time = result[2]
unique_id = str(uid) + str(pid) + str(time)
options_csv_dict[unique_id] = {"uid": uid, "pid": pid, "time": time}
options_csv_dict[unique_id].update(options_views_dicts[unique_id])
dict_data = options_csv_dict.values()
write_csv_from_dict(dict_data, "options_view.csv")
print("Created options_view csv")
def test_write_options_csv(where):
#cat_opts = []
#for cat in CAT_KEYS:
# for o in CAT_OPTIONS[cat]
# cat_opts.append(o)
options_list = ','.join(BINARY_OPTIONS) # + cat_opts)
print("Options list test: " + options_list)
c.execute('''select %s from options''' % options_list)
results = c.fetchall()
with open('test_csv.csv', 'w') as testfile:
writer = csv.writer(testfile)
writer.writerow(BINARY_OPTIONS)
for r in results:
writer.writerow(r)
# ------------ CLEAN DATABASE -----------------
def remove_error_entries():
print("INFO: Removing entries with errors...")
c.execute("select pid from options where error == 1")
options_to_remove = [row[0] for row in c.fetchall()]
for pid in options_to_remove:
c.execute('''delete from options where pid == %d''' % pid)
num_removed = len(options_to_remove)
print("INFO: Removed " + str(num_removed) + " entries with errors from options")
return num_removed
def remove_invalid_puzzle_ranks():
print("INFO: Removing invalid puzzle rank entries...")
c.execute("select pid from rprp_puzzle_ranks where is_valid == 0")
ranks_to_remove = [row[0] for row in c.fetchall()]
for pid in ranks_to_remove:
c.execute('''delete from rprp_puzzle_ranks where pid == %d''' % pid)
num_removed = len(ranks_to_remove)
print("INFO: Removed " + str(num_removed) +
" entries from rprp_puzzle_ranks with invalid puzzle ranks")
def remove_beginner_puzzle_entries():
print("INFO: Removing Beginner entries...")
beginner_puzzles = map(int, PIDS_BY_CAT['Beginner'])
beginner_puzzles_chunks = \
[beginner_puzzles[i:i + 100] for i in range(0, len(beginner_puzzles), 100)]
puzzles_to_remove = []
ranks_to_remove = []
options_to_remove = []
for chunk in beginner_puzzles_chunks:
str_chunk = str(tuple(chunk))
c.execute('''select nid from rpnode__puzzle where nid IN %s''' % str_chunk)
puzzles_to_remove += [row[0] for row in c.fetchall()]
c.execute('''select pid from rprp_puzzle_ranks where pid IN %s''' % str_chunk)
ranks_to_remove += [row[0] for row in c.fetchall()]
c.execute('''select pid from options where pid IN %s''' % str_chunk)
options_to_remove += [row[0] for row in c.fetchall()]
num_puzzles_to_remove = len(puzzles_to_remove)
num_ranks_to_remove = len(ranks_to_remove)
num_options_to_remove = len(options_to_remove)
puzzles_to_remove_chunks = \
[puzzles_to_remove[i:i + 1000] for i in range(0, len(puzzles_to_remove), 1000)]
ranks_to_remove_chunks = \
[ranks_to_remove[i:i + 1000] for i in range(0, len(ranks_to_remove), 1000)]
options_to_remove_chunks = \
[options_to_remove[i:i + 1000] for i in range(0, len(options_to_remove), 1000)]
print("INFO: Removing Beginner entries from rpnode__puzzle...")
for chunk in puzzles_to_remove_chunks:
c.execute('''delete from rpnode__puzzle where nid IN %s''' % str(tuple(chunk)))
print("INFO: Removed " + str(num_puzzles_to_remove) + " entries from rpnode__puzzle for beginner puzzles")
print("INFO: Removing Beginner entries from rprp_puzzle_ranks...")
for chunk in ranks_to_remove_chunks:
c.execute('''delete from rprp_puzzle_ranks where pid IN % s''' % str(tuple(chunk)))
print("INFO: Removed " + str(num_ranks_to_remove) + " entries from rprp_puzzle_ranks for beginner puzzles")
print("INFO: Removing Beginner entries from options...")
for chunk in options_to_remove_chunks:
c.execute('''delete from options where pid IN %s''' % str(tuple(chunk)))
print("INFO: Removed " + str(num_options_to_remove) + " entries from options for beginner puzzles")
return num_options_to_remove
def remove_intro_puzzle_entries():
print("INFO: Removing Intro entries...")
c.execute("select pid from rprp_puzzle_ranks where pid not in (select nid from rpnode__puzzle)")
ranks_to_remove = [row[0] for row in c.fetchall()]
c.execute("select pid from options where pid not in (select nid from rpnode__puzzle)")
options_to_remove = [row[0] for row in c.fetchall()]
num_ranks_to_remove = len(ranks_to_remove)
num_options_to_remove = len(options_to_remove)
ranks_to_remove_chunks = [ranks_to_remove[i:i + 100] for i in range(0, len(ranks_to_remove), 100)]
options_to_remove_chunks = \
[options_to_remove[i:i + 1000] for i in range(0, len(options_to_remove), 1000)]
print("INFO: Removing Intro entries from rprp_puzzle_ranks...")
for chunk in ranks_to_remove_chunks:
c.execute('''delete from rprp_puzzle_ranks where pid IN %s''' % str(tuple(chunk)))
print("INFO: Removed " + str(num_ranks_to_remove) + " entries from rprp_puzzle_ranks for intro puzzles")
print("INFO: Removing Intro entries from options...")
for chunk in options_to_remove_chunks:
c.execute('''delete from options where pid IN %s''' % str(tuple(chunk)))
print("INFO: Removed " + str(num_options_to_remove) + " entries from options for intro puzzles")
return num_options_to_remove
def remove_major_missing_entries():
print("INFO: Removing entries with major missing data...")
all_options = BINARY_OPTIONS + CAT_KEYS
sep = ","
query_cols = sep.join(["uid", "pid", "time"] + all_options)
missing_dict = {"total_entry_count": 0}
for option in all_options:
if option not in MISSING_DEFAULTS.keys():
missing_dict[option] = 0
c.execute('''select %s from options''' % query_cols)
results = c.fetchall()
for entry_idx in range(len(results)):
num_major_options_missing = 0
uid, pid, time = results[entry_idx][0:3]
# for all options in the entry
for o_index in range(len(results[entry_idx]) - 3):
o_name = all_options[o_index]
# if we expect to have data for this option but do not
if o_name not in MISSING_DEFAULTS.keys():
# increment counts for missing option in missing_dict
if results[entry_idx][3 + o_index] is None:
num_major_options_missing += 1
missing_dict[o_name] += 1
# remove entry from options table if it has any major options missing
if num_major_options_missing > 0:
missing_dict["total_entry_count"] += 1
c.execute('''delete from options where uid = \"%s\" and pid == %d and time == %d''' % (uid, pid, time))
return missing_dict
def replace_minor_missing_entries():
print("INFO: Replacing minor missing data entries with default values...")
for option in MISSING_DEFAULTS.keys():
# count / sanity check
c.execute('''select %s from options where %s is NULL ''' % (option, option))
results = c.fetchall()
print("Replaced " + str(len(results)) + " missing entries for " + str(option) + " with default value")
c.execute('''update options set %s = %d where %s is NULL ''' % (option, MISSING_DEFAULTS[option], option))
def clean_db():
print("INFO: Cleaning database (this may take a while)...")
entries_removed = 0
entries_removed += remove_error_entries()
remove_invalid_puzzle_ranks() # doesn't remove any options entries
entries_removed += remove_beginner_puzzle_entries()
entries_removed += remove_intro_puzzle_entries()
missing_dict = remove_major_missing_entries()
entries_removed += missing_dict["total_entry_count"]
print("INFO: Removed " + str(entries_removed) + " bad entries from options table.")
print("INFO: Removed " + str(missing_dict["total_entry_count"]) + " entries with missing options data.")
for option in missing_dict.keys():
if option == "total_entry_count":
continue
print("INFO: Found " + str(missing_dict[option]) + " entries with missing " + str(option))
replace_minor_missing_entries()
conn.commit()
print("INFO: Databased cleaned.")
# ------------ END CLEAN DATABASE -----------------
def import_categories():
global PIDS_BY_CAT
with open('puzzle_categories.csv', 'r') as cat_file:
reader = csv.reader(cat_file)
for row in reader:
pid = row[0]
for cat in row[1:]:
if cat == "NULL":
continue
if cat not in PIDS_BY_CAT.keys():
PIDS_BY_CAT[cat] = []
PIDS_BY_CAT[cat].append(pid)
print("Imported " + str(len(PIDS_BY_CAT)) + " puzzle categories.")
drop_cats = []
for cat in PIDS_BY_CAT.keys():
num_puz = len(PIDS_BY_CAT[cat])
if args.debug:
print(" " + cat + ": " + str(num_puz) + " puzzles")
if num_puz < 10:
if args.debug:
print(" INFO: Dropping " + cat + " (too few puzzles)")
drop_cats.append(cat)
for cat in drop_cats:
PIDS_BY_CAT.pop(cat, None)
def import_experts(recalculate=False):
global EXPERTS
EXPERTS = []
if recalculate:
get_all_experts()
if EXPERTS == []:
with open('experts.csv', 'r') as exp_file:
reader = csv.reader(exp_file)
for row in reader:
EXPERTS.append(row[0])
print("Imported " + str(len(EXPERTS)) + " experts.")
# -------- END ONE TIME FUNCTIONS -----------------
# -------- VIEW-BASED CALCULATIONS ----------------
# Input: string of where queries for options table (e.g. "where uid=... and pid=....")
# For each result, create a view dict of bools representing each option, sorted by key
# Output: dict of views (dict of dicts, keys are unique ids = gid/uid + pid + time (concatted))
def query_to_views(where, cat_only=False):
if args.debug:
print("\nDEBUG: query_to_views " + str(where))
views = {} # dict of dicts, uniquely identified by uid, pid, and time
query = '''select r.gid, o.uid, o.pid, o.time, %s, %s from options o
join (select gid, best_score_is_hs, uid, pid from rprp_puzzle_ranks) r
on o.uid == r.uid and o.pid == r.pid %s''' \
% (','.join(opt for opt in BINARY_OPTIONS), ','.join(opt for opt in CAT_OPTIONS), where)
c.execute(query)
results = c.fetchall()
num_bin_options = len(BINARY_OPTIONS)
num_cat_options = len(CAT_OPTIONS)
for result in results:
gid = 0 if result[0] is None else result[0]
unique_id = str(gid) + "/" + str(result[1]) + str(result[2]) + str(result[3])
if unique_id not in views:
views[unique_id] = {}
else:
pass
#print("WARN: duplicate")
#print(unique_id)
view = views[unique_id]
if not cat_only:
for i in range(num_bin_options):
view[BINARY_OPTIONS[i]] = result[4 + i]
for j in range(num_cat_options):
cat_option_name = list(CAT_OPTIONS.keys())[j]
cat_option_values = CAT_OPTIONS[cat_option_name]
result_value = result[4 + num_bin_options + j]
for option in cat_option_values:
view[option] = 1 if option == result_value else 0
views[unique_id] = view
if args.debug:
print(" query_to_views: " + str(len(views.keys())) + " results\n")
return views.copy()
def list_clean(list):
for i in range(len(list)):
if list[i] == u'0':
list[i] = 0
elif list[i] == u'1':
list[i] = 1
return list
# convert unicode to ints, the hardcoded way
def unicode_clean(cluster):
for i in range(len(cluster)):
for j in range(len(cluster[i])):
if cluster[i][j] == u'0':
cluster[i][j] = 0
elif cluster[i][j] == u'1':
cluster[i][j] = 1
return cluster
# Input: view dict from query_to_views
# Output: list of just the values in a sorted order to keep things consistent
def view_dict_to_list(view):
list = []
for bin_opt in BINARY_OPTIONS:
try:
list.append(view[bin_opt])
except Exception as e:
print("view_dict_to_list error")
print("The view was:")
print(view)
if view.startswith("U"):
print("Did you accidentally give it the uid instead of the value of views[uid]?")
for cat_opt in CAT_KEYS:
for opt in CAT_OPTIONS[cat_opt]:
try:
list.append(view[opt])
except Exception as e:
print("view_dict_to_list error")
print("The view was:")
print(view)
return list
# doesn't work if some of the dimensions were deleted during analysis
# The reverse of view_dict_to_list
# Input: list of just the view option values in a sorted order to keep things consistent
# Output: view dict as "option name": option value
def list_to_view_dict(list):
view = {}
for bin_opt in BINARY_OPTIONS:
view[bin_opt] = list.pop(0)
for cat_opt in CAT_KEYS:
for opt in CAT_OPTIONS[cat_opt]:
view[opt] = list.pop(0)
return view
# Returns true iff score is <= 5% of best scores for this puzzle
def is_highscore(pid, score):
# get the score of the entry that's exactly 95th percentile, check our score vs that
min_score = get_highscore(pid)
return score <= min_score
# Returns score threshold for pid to be in top 5% of best scores for the puzzle, lower scores are better
def get_highscore(pid):
c.execute(
'''select distinct uid, best_score from rprp_puzzle_ranks where pid=%d group by uid order by best_score asc;''' % pid)
# count entries, get the entry that's exactly 95th percentile
results = c.fetchall()
num_scores = len(results)
index = min(int(math.ceil(num_scores * 0.05)) - 1, len(results) - 1) # prevent index out of range error
min_score = results[index][1]
return min_score
# Returns number of high scores in puzzles
def count_expertise(uid):
# get list of their best scores for each puzzle
# count is_highscore
has_hs_column = True
try:
c.execute('''select pid, best_score, best_score_is_hs from rprp_puzzle_ranks where uid=\"%s\" group by pid order by best_score asc;''' % uid)
except:
print("WARN: no is_highscore column available")
has_hs_column = False
c.execute('''select pid, best_score from rprp_puzzle_ranks where uid=\"%s\" group by pid order by best_score asc;''' % uid)
results = c.fetchall()
num_highscores = 0
for result in results:
if has_hs_column:
if result[2] == 1:
num_highscores += 1
elif is_highscore(result[0], result[1]):
num_highscores += 1
return num_highscores
# calculates the similarity between two views - Euclidean distance
# assumes views are a vector of interval variables
def distance(view1, view2):
dist = [(a - b)**2 for a, b in zip(view1, view2)]
return math.sqrt(sum(dist)) # apparently this method is faster than external lib methods
def generate_frequencies_file():
freq_bin_rows = []
freq_cat_rows = []
for o in BINARY_OPTIONS:
try:
c.execute(FREQ_COUNT_QUERY % (o,o,o))
results = [[o] + list(r) for r in c.fetchall()]
freq_bin_rows += results
except Exception as e:
print("Invalid option: " + str(o))
for o in CAT_OPTIONS.keys():
try:
c.execute(FREQ_COUNT_QUERY % (o, o, o))
results = c.fetchall()
total = sum([x[1] for x in results])
for result in results:
freq_cat_rows += [[result[0], "0", total - result[1]], [result[0], "1", result[1]]]
except Exception as e:
print("Invalid option: " + str(o))
with open(FREQUENCIES_FILE, 'w') as cc:
writer = csv.writer(cc)
writer.writerow(["option", "value", "freq"])
writer.writerows(freq_bin_rows)
writer.writerows(freq_cat_rows)
# Input: a View dict
# Output: the View Dict, elementwise multiplied by (1-frequency)
def apply_inverse_frequency_weighting(view):
# Generate the frequencies file (uncomment if need be)
#generate_frequencies_file()
if not os.path.isfile(FREQUENCIES_FILE):
raise Exception("ERR: Frequency file not found: " + FREQUENCIES_FILE)
freq_dict = {}
# Read in the frequencies file
with open(FREQUENCIES_FILE, 'r') as frequencies_file:
reader = csv.reader(frequencies_file)
next(reader, None) # skip header row
for row in reader:
freq_dict[row[0] + row[1]] = int(row[2])
for opt in view.keys():
try:
option_val = int(view[opt])
zero = freq_dict[opt + "0"]
one = freq_dict[opt + "1"]
if option_val == 0:
weight = zero / (zero + one) if zero > 0 else 0 # avoid div by 0 error
else:
weight = one / (zero + one) if one > 0 else 0
view[opt] = 1.0 - weight
except KeyError as e:
if opt is not "Hydrophobic": # hard code :( but Hydrophobic seems to be removed from game
print("WARN: No frequency found in " + FREQUENCIES_FILE + " for option: " + opt)
return view
# calculates the density of a cluster - i.e., the mean similarity between every view and every other view
# returns mean and std
# if dims option is set, calculates density only for specific dimension(s)
# O(n^2) algorithm
#def density(cluster, dims=[-1]):
# distances = []
# for i in range(len(cluster)):
# for j in range(len(cluster)):
# if i != j:
# if dims == [-1]:
# distances.append(distance(cluster[i], cluster[j]))
# else:
# d_i = []
# d_j = []
# for d in dims:
# if d > len(cluster[0])-1 or d < 0:
# raise IndexError("Tried to calculate density of a cluster on an invalid dimension")
# else:
# d_i.append(cluster[i][d])
# d_j.append(cluster[j][d])
# distances.append(distance(d_i, d_j))
# mean = numpy.mean(distances)
# std = numpy.std(distances)
# return mean,std
# Return an array of standard deviations for each dimension in the cluster
def density(cluster, dims=[-1]):
if cluster == []: # handle empty set
return []
stds = []
if dims == [-1]:
dims = range(len(cluster[0]))
for i in dims:
stds.append(numpy.std([view[i] for view in cluster]))
return stds
# maybe there should be some way to specify dims by human-readable option (for this and density function)
# returns the centroid of a cluster
# if dims option is set, calculates for only specific dimension(s)
def centroid(clus, dims=[-1]):
if clus == []: # handle empty set
return []
cluster = clus
if dims != [-1]:
cluster = numpy.delete(cluster, dims, axis=1)
return numpy.mean(cluster, axis=0).tolist()
# returns the entropy for a binary var
def entropy(count_0, count_1):
p = count_1 / (count_0 + count_1)
# math.log(0,2) will raise a value error, taken to be 0.0 instead
if p == 0.0 or p == 1.0:
return 0.0
return -(p * math.log(p,2)) - (1 - p) * math.log(1-p,2)
# returns frequency of true for a binary var
def true_frequency(count_0, count_1):
return (count_1 * 1.0) / (count_0 + count_1)
# -------- END VIEW-BASED CALCULATIONS -------------
# ----------------- MAIN ---------------------------
def io_mode(args):
single_query = args.execute != '' or args.quick != ''
command = ''
if args.execute:
command = "e " + args.execute
if args.quick:
command = args.quick
while (command != 'q' and command != 'exit'):
command = command.lower()
if command == 'h':
print("h - help")
print("q - quit")
print("t - list tables") # options, rpnode__puzzle, sqlite_sequence, rprp_puzzle_ranks
print("c [table] - list columns in table")
print("e [command] - execute command")
print("freq [option] - count values of an option (or 'all')")
print("ent [option] - get entropy of option (or 'all')")
print("clean - clean the database of bad entries")
print("process - add new data to database, e.g. highscore info, is expert info")
print("stats - print experiment details")
print("main - run all main stats tests (will take a while)")
print("csv options - write options table to csv")
if command == 't':
c.execute('''SELECT name from sqlite_master where type = 'table'; ''')
for t in c.fetchall():
print(t[0])
if command.startswith("c "):
table = command[2:]
try:
c.execute('''SELECT * from %s;''' % table)
for info in c.description:
print(info[0])
except:
print("Invalid table name: " + str(table))
if command == "freq all":
freq_all()
elif command == "binarized freq all":
get_all_freq_binarized_options(output=True)
elif command.startswith("freq "):
option = command[5:]
try:
c.execute(FREQ_COUNT_QUERY % (option,option,option))
print(c.fetchall())
except Exception as e:
print("Invalid option: " + str(option))
if command == "clean":
clean_db()
if command == "main":
main_stats()
if command == "csv options":
write_options_csv("")
if command == "ent all":
get_all_entropies(output=True)
elif command.startswith("ent "):
if not is_db_clean:
raise Exception("Database must be clean to get entropies")
option = command[4:]
try:
c.execute(FREQ_COUNT_QUERY % (option,option,option))
results = c.fetchall()
# note that it returns (None,0) as result 0, I don't know why
count_0 = results[0][1]
count_1 = results[1][1]
print(entropy(count_0, count_1))
except Exception as e:
print("Invalid option: " + str(option))
if command.startswith("e "):
com = command[2:]
if not com.endswith(";"): # be nice to user, append ; if need be
com = com + ";"
try:
c.execute(com)
print(c.fetchall())
except sqlite3.OperationalError as e:
print("ERR: unable to perform operation")
print("INFO: " + str(e))
if command == "process":
print("INFO: Processing data:")
print("INFO: adding puzzle category labels")
add_puzzle_cat_col_to_ranks()
add_puzzle_cat_col_to_options()
print("INFO: Updating high scores...")
add_is_highscore_cols("rprp_puzzle_ranks")
print("INFO: Finding experts...")
import_experts(recalculate=True)
add_is_expert_col("rprp_puzzle_ranks")
add_is_expert_col("options")
add_is_selected_novice_to_options(False)
if command == "stats":
print_experiment_details()
if not single_query:
print("Enter command (h for help): ")
command = input("> ")
else:
command = 'q'
if not single_query:
print("Goodbye")
if __name__ == "__main__":
import argparse
prog_desc = "Foldit view options analysis."
parser = argparse.ArgumentParser(description=prog_desc)
parser.add_argument('-debug', action='store_true', help="Print debug info.")
parser.add_argument('--test', action='store_true', help="Run test suite instead of I/O operations.")
parser.add_argument('--quick', default="", help="Quick I/O command, e.g. 't' to list tables.")
parser.add_argument('--execute', default="", help="Run a single SQL query.")
args = parser.parse_args()
print("Loading modules and data...")
import math, operator, csv, sys, re, numpy, sqlite3, datetime, os.path
import cProfile, pstats, glob
from scipy import stats
import scipy.cluster.hierarchy as shc
from skbio.diversity.alpha import shannon
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
global conn, is_db_clean
is_db_clean = False
if os.path.isfile('foldit_clean.db'):
conn = sqlite3.connect('foldit_clean.db')
is_db_clean = True
print("INFO: Found clean database: foldit_clean.db")
elif os.path.isfile('folditx.db'):
conn = sqlite3.connect('folditx.db')
print("WARN: Database is not clean. Use the --quick clean command and save database as foldit_clean.db")
else:
print("ERR: No database found with name folditx.db or foldit_clean.db")
exit(1)
global c
c = conn.cursor()
import_categories()
try:
import_experts(recalculate=False)
except IOError as e:
import_experts(recalculate=True)
print("...Loaded.")
if args.debug:
print("DEBUG mode on")
# TEST
# import StringIO
# pr = cProfile.Profile()
# pr.enable()
# s = StringIO.StringIO()
# test(args)
# pr.disable()
# sortby = 'cumulative'
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
# ps.print_stats()
# print(s.getvalue())
# exit(1)
if args.test:
test(args)
else:
io_mode(args)
``` |
{
"source": "joshmiller17/icr",
"score": 3
} |
#### File: joshmiller17/icr/analysis.py
```python
import pandas as pd
import numpy as np
from functools import reduce
## Constants
ANSWER_COLUMNS = ['Codes Favorite', 'Codes Least Favorite', 'Codes Updates', 'Codes Tutorial Favorite', 'Codes Tutorial Least Favorite']
def ADD(a, b): return a + b
## Reading
def clean_df(df_withna_with_invalid):
df_with_invalid = df_withna_with_invalid.fillna("")
df = df_with_invalid[df_with_invalid['Valid'] == 'Yes']
def clean_series(series):
def clean_item(item):
split_item = item.replace(' ', '')\
.replace('/', '')\
.upper()\
.split(',')
return list(filter(None, split_item))
return series.map(clean_item)
for ans_col in ANSWER_COLUMNS:
df[ans_col] = clean_series(df[ans_col])
return df
def clean_and_read(filename): return clean_df(pd.read_csv(filename, sep='\t'))
BASENAME = "data/"
filenames = ["C1-v4.tsv", "C2-v4.tsv", "C3-v4.tsv"]
full_filenames = map(lambda filename: f'{BASENAME}{filename}', filenames)
coder_dfs = list(map(clean_and_read, full_filenames))
## Combining
def combine_dfs(*dfs):
new_df = pd.DataFrame()
new_df['Game'] = dfs[0]['Game']
for ans_col in ANSWER_COLUMNS:
new_df[ans_col] = reduce(ADD, [df[ans_col] for df in dfs])
return new_df
combined_df = combine_dfs(*coder_dfs)
## Analysis
def dump_df_info(df_name, df, as_csv):
def dump_series_info(series_name, series):
reduced_list = list(filter(lambda code: code != "NA", reduce(ADD, series)))
num_codes = len(reduced_list)
values = pd.Series(reduced_list).value_counts()
value_df = pd.DataFrame(values).rename({0: "Counts"}, axis=1)
value_df["Percents"] = value_df["Counts"] / num_codes * 100
print(f'{series_name}\n')
print(f'{value_df.to_csv() if as_csv else value_df}\n')
print(f'num codes: {num_codes}\n')
print(f'== INFORMATION FOR {df_name.upper()} == \n')
for ans_col in ANSWER_COLUMNS:
dump_series_info(ans_col, df[ans_col])
dump_series_info("All columns for this dataframe combined",
pd.concat([df[ans_col] for ans_col in ANSWER_COLUMNS]))
grouped_by_game = combined_df.groupby("Game")
## Output functions
def dump_all():
dump_df_info("all data", combined_df, False)
dump_df_info("all data", combined_df, True)
def dump_by_game():
for name, df in grouped_by_game:
if name != '':
dump_df_info(name, df, False)
for name, df in grouped_by_game:
if name != '':
dump_df_info(name, df, True)
def dump_combined_except(*games_to_exclude):
filtered_df = combined_df[combined_df['Game'].map(lambda game: game not in games_to_exclude)]
dump_df_info(f"All data combined, except for games {games_to_exclude}", filtered_df, True)
dump_df_info(f"All data combined, except for games {games_to_exclude}", filtered_df, False)
# @JOSH here you are
dump_combined_except("Foldit")
dump_combined_except("Foldit", "EteRNA", "EyeWire")
# @JOSH if you want these as well
# dump_all()
# dump_by_game()
print(f"Games List: {grouped_by_game.groups.keys()}")
``` |
{
"source": "joshmiller17/pl_parser",
"score": 3
} |
#### File: joshmiller17/pl_parser/jm_parser24.py
```python
"""
NOTES
- Error messages differentiate between syntax errors (fault of program) and parser error (my fault), used as assertions
"""
import traceback
import copy
SPACE = 0x20
LF = 0xa
CR = 0xd
INDENTATION = " "
ARROW = "->"
PRIMTYPES = ["bool", "char", "string", "int", "float"]
WHITESPACE = [ SPACE, LF, CR ]
COMMENT = "//" # followed by LF or CR
ASSIGNOPS = ["="]
OROPS = ["||"]
ANDOPS = ["&&"]
RELOPS = ["==", "!=", "<", "<=", ">", ">="]
ADDOPS = ["+", "-"]
MULOPS = ["*", "/"]
UNOPS = ["!", "-"]
OPS = OROPS + ANDOPS + RELOPS + ADDOPS + MULOPS + UNOPS
PRINTABLES = [" ", "!", "#", "$", "%", "&", "(", ")", "*", \
"+", ",", "-", ".", "/", ":", ";", "<", "=", ">", "?", "@", "[", "]", "^", "{", "}", "~"]
STM_REDIRECTS = {";" : "<stm-empty>", "if" : "<stm-if>", \
"while" : "<stm-while>", "for" : "<stm-for>", \
"return" : "<stm-ret>", "{" : "<block>", \
"halt" : "<stm-halt>"}
RESERVED = ["if", "while", "for", "return", "halt", "bool", "char", "string", "int", "float"]
illegal = False
error_line = 0
error_msg = ""
parsing_string = False
expecting = ["<protodecs>", "<classdecs>", "<stm>", "<end>"] #initial syntax expectations
protocols = []
classes = []
stms = []
line_count = 0
typeids = []
exp_grammar_stack = []
EXP_GRAMMAR = ['(',')','[',']']
current_obj = None
current_obj_type = None
object_stack = [None]
object_type_stack = ["None"]
current_token = ""
temp_token = ""
DEBUG_LEVEL = 2.5 # amount of debug output, range [0,3] in steps of 0.5 because debugging is messy
############################
######## CLASS DEFS ########
############################
class Protocol:
def __init__(self):
self.typeid = None
self.extends = []
self.typevars = []
self.funprotos = []
self.expecting_more_vars = False
self.open_tvars = False
def set_typeid(self, i):
self.typeid = i
def add_typevar(self, v):
self.typevars.append(v)
def set_expecting(self, b):
self.expecting_more_vars = b
# must be a typeapp
def add_extends(self, t):
if t.__class__.__name__ == "str":
ta = Typeapp()
ta.typeid = t
t2 = ta
else:
t2 = t
if t2.__class__.__name__ == "Typeapp":
self.extends.append(t2)
else:
throw_error("Parser error, expected <typeapp>")
def add_funproto(self, f):
self.funprotos.append(f)
def __str__(self):
return recursive_ast_to_string(self, "", 0)
class Funproto:
def __init__(self):
self.id = None
self.rtype = None
self.typevars = []
self.formals = []
self.expecting_more_vars = False
self.open_tvars = False
def set_id(self, i):
self.id = i
def add_typevar(self, v):
self.typevars.append(v)
def add_formal(self, f):
self.formals.append(f)
def set_expecting(self, b):
self.expecting_more_vars = b
def set_rtype(self, r):
self.rtype = r
def __str__(self):
return recursive_ast_to_string(self, "", 0)
class Fundec:
def __init__(self):
self.id = None
self.rtype = None
self.typevars = []
self.formals = []
self.block = None
self.expecting_more_vars = False
self.open_tvars = False
def set_id(self, i):
self.id = i
def add_typevar(self, v):
self.typevars.append(v)
def add_block(self, b):
self.block = b
def add_formal(self, f):
self.formals.append(f)
def set_expecting(self, b):
self.expecting_more_vars = b
def set_rtype(self, r):
self.rtype = r
def __str__(self):
return recursive_ast_to_string(self, "", 0)
class Formal:
def __init__(self):
self.type = None
self.id = None
def set_type(self, t):
self.type = t
def set_id(self, i):
self.id = i
def __str__(self):
return recursive_ast_to_string(self, "", 0)
class Class:
def __init__(self):
self.id = None
self.implements = []
self.typevars = []
self.funprotos = []
self.init_formals = []
self.block = None
self.expecting_more_vars = False
self.open_tvars = False
self.expecting_formals = True
self.bodydecs = []
def set_id(self, i):
self.id = i
# must be a typeapp
def add_implements(self, t):
if t.__class__.__name__ == "str":
ta = Typeapp()
ta.typeid = t
t2 = ta
else:
t2 = t
if t2.__class__.__name__ == "Typeapp":
self.implements.append(t2)
else:
throw_error("Parser error, expected <typeapp>")
def add_typevar(self, v):
self.typevars.append(v)
def set_expecting(self, b):
self.expecting_more_vars = b
def found_formals(self):
self.expecting_formals = False
def add_formal(self, f):
self.init_formals.append(f)
def add_block(self, b):
self.block = b
def add_dec(self, b):
self.bodydecs.append(b)
def __str__(self):
return recursive_ast_to_string(self, "", 0)
class Block:
def __init__(self):
self.local_decs = []
self.stms = []
self.dec_phase = True
def add_dec(self, l):
self.local_decs.append(l)
def end_decs(self):
self.dec_phase = False
def add_stm(self, s):
self.stms.append(s)
def __str__(self):
return recursive_ast_to_string(self, "", 0)
class Stm:
def __init__(self):
self.style = None # {"empty", "exp", "if", "while", "return0", "return", "block", "halt", "for"}
self.exps = []
self.stms = []
self.independent = False
self.block = None
self.vardec = None
def set_style(self, s):
self.style = s
def add_block(self, b):
self.block = b
def add_dec(self, d):
self.vardec = d
def add_exp(self, e):
self.exps.append(e)
def add_stm(self, s):
self.stms.append(s)
def __str__(self):
return recursive_ast_to_string(self, "", 0)
class Typeapp:
def __init__(self):
self.types = None
self.tvar = None
self.typeid = None
def add_tvar(self, t):
self.tvar = t
def add_typeid(self, t):
self.typeid = None
def set_types(self, t):
self.types = t
class Types:
def __init__(self):
self.types = []
def __repr__(self):
s = ""
for t in types:
s += t + ","
return s[:-1]
def __str__(self):
s = ""
for t in types:
s += t + ","
return s[:-1]
class Dec:
def __init__(self):
self.type = None
self.id = None
self.lit = None
self.eq = False
self.dectype = None
def set_type(self, t):
self.type = t
def set_id(self, i):
self.id = i
def consume_eq(self):
self.eq = True
def set_lit(self, l):
self.lit = l
def __str__(self):
return recursive_ast_to_string(self, "", 0)
class Constdec(Dec):
def __init__(self):
Dec.__init__(self)
self.dectype = "const"
class Vardec(Dec):
def __init__(self):
Dec.__init__(self)
self.dectype = "var"
self.exp = None
def add_exp(self, e):
self.exp = e
def __str__(self):
return recursive_ast_to_string(self, "", 0)
class Globaldec(Dec):
def __init__(self):
Dec.__init__(self)
self.dectype = "global"
class Fielddec:
def __init__(self):
self.type = None
self.id = None
def set_type(self, t):
self.type = t
def set_id(self, i):
self.id = i
def __str__(self):
return recursive_ast_to_string(self, "", 0)
# general approach for handling <exp> is as follows:
# collect all tokens as a raw array of strings until we know grammatically the <exp> is done
# then make several passes through the <exp> to determine <factor>, <term>, <simple>, <conjunct>, etc.
class Exp:
# EXPRESSIONS
# factor, factor-rest, etc
# simple is (term (addop simple)*) -- any terms joined by addops
# term: a term is a (factor (mulop factor)*) -- any factors joined by mulops
# disjunct is (conjunct (andop disjunct)*) -- any conjuncts joined by andop
# conjunct is (simple (relop simple)*) -- any simples joined by relops
# lhs is (disjunct (orop lhs)*) -- any disjuncts joined by orops
# exp is lhs (assignop exp)*
def __init__(self):
self.raw = [] # tokens known to be in <exp>
self.grammar_stack = [] # for counting matching () and []
self.index = 0 # used as a pointer to where in the raw array we're processing
self.current_factor = None
self.left = None
self.op = None
self.right = None
def raw_append(self, r):
self.raw.append(r)
def has_op(self, ops):
for m in ops:
if m in self.raw:
return True
return False
def new_factor(self):
if self.current_factor == None:
self.current_factor = Factor()
# iterate through raw string turning tokens into factors
def make_factors(self):
self.index = 0
while self.index < len(self.raw):
if self.raw[self.index] in OPS:
# op, ignore
self.index += 1
else:
self.make_factor()
try:
if "Factor" == self.raw[self.index].type:
self.index += 1
except Exception as e:
if illegal:
return
print("DEBUG: compiled this <factor>: " + str(self))
# handler for how to compose factor based on first token
def make_factor(self):
if DEBUG_LEVEL > 1.5:
print("DEBUG: factor index = " + str(self.index))
print("DEBUG: raw = ")
for f in self.raw:
print(str(f))
if DEBUG_LEVEL > 2.5:
if illegal:
return
else:
print("DEBUG: ------- waiting for ready (press enter to continue)")
raw_input()
# check for factor-unop
token = self.raw[self.index]
if DEBUG_LEVEL > 2.5:
print("DEBUG: parsing factor token " + str(token))
for u in UNOPS:
if u in token:
if u is token:
self.new_factor()
self.handle_factor_unop()
else: # tight unop
new_tokens = read_tight_code(self.raw.pop(self.index),internal=True)
for n in new_tokens:
self.raw.insert(self.index, n)
return
# no unop, continue
if token == "new":
self.raw.pop(self.index) # remove new
self.new_factor()
self.handle_factor_new()
elif token == "lambda":
self.raw.pop(self.index) # remove lambda
self.new_factor()
self.handle_factor_lam()
elif '.' in token:
if is_floatliteral(token):
self.new_factor()
self.handle_factor_lit()
else:
new_tokens = read_tight_code(self.raw.pop(self.index), internal=True, dot=True)
for n in new_tokens:
self.raw.insert(self.index, n)
return
elif '(' in token:
if '(' is token:
self.raw.pop(self.index) # remove (
self.new_factor()
self.handle_factor_exp()
else:
new_tokens = read_tight_code(self.raw.pop(self.index),internal=True)
for n in new_tokens:
self.raw.insert(self.index, n)
return
elif is_literal(token):
self.new_factor()
self.handle_factor_lit()
elif is_id(token):
self.new_factor()
self.handle_factor_id()
else:
throw_error("Syntax error while parsing <factor>", addl="Token <" + token + "> confused the parser")
def handle_factor_unop(self):
if DEBUG_LEVEL > 1.5:
print("DEBUG: handling factor unop")
print("DEBUG: factor index = " + str(self.index))
print("DEBUG: raw = ")
for f in self.raw:
print(str(f))
if not self.current_factor:
throw_error("Assertion error: current <factor> is None while parsing <unop>")
if self.raw[self.index] not in UNOPS:
throw_error("Expected <unop> while parsing <factor>")
self.current_factor.set_unop(self.raw.pop(self.index))
temp_factor = self.current_factor
new_factor()
temp_factor.set_subfactor(self.current_factor)
temp_factor.set_valid(True)
self.raw.insert(self.index, temp_factor)
def handle_factor_lit(self):
if DEBUG_LEVEL > 1.5:
print("DEBUG: handling factor lit")
print("DEBUG: factor index = " + str(self.index))
print("DEBUG: raw = ")
for f in self.raw:
print(str(f))
if not self.current_factor:
throw_error("Assertion error: current <factor> is None while parsing <literal>")
if not is_literal(self.raw[self.index]):
throw_error("Expected <literal> while parsing <factor>")
else:
self.current_factor.set_literal(self.raw.pop(self.index))
self.handle_factor_rest()
def handle_factor_new(self):
if DEBUG_LEVEL > 1.5:
print("DEBUG: handling factor new")
print("DEBUG: factor index = " + str(self.index))
print("DEBUG: raw = ")
for f in self.raw:
print(str(f))
self.current_factor.is_new = True
if not self.current_factor:
throw_error("Assertion error: current <factor> is None while parsing new")
if self.current_factor.id is None and self.current_factor.factor_type is None:
if is_id(self.raw[self.index]):
self.current_factor.set_id(self.raw.pop(self.index))
self.handle_factor_new()
elif is_type(self.raw[self.index]):
self.current_factor.set_factor_type(self.raw.pop(self.index))
self.handle_factor_new()
else:
throw_error("Syntax error: expected <id> or <type> while parsing <factor>")
elif self.current_factor.id is not None and is_type(self.raw[self.index]):
throw_error("TODO factor new <id> <<types>>...")
elif self.current_factor.factor_type is not None and '[' == self.raw[self.index]:
self.raw.pop(self.index)
new_exp_end = self.raw.index(']')
if new_exp_end == -1:
throw_error("Syntax error while parsing [ <exp> ] of <factor>")
else:
self.raw.pop(new_exp_end)
new_exp = self.raw[self.index : new_exp_end]
exp = self.handle_bracket_exp(new_exp)
self.current_factor.add_exp(e)
self.handle_factor_rest()
elif '(' in self.raw[self.index]:
# fixme this doesn't handle recursive actuals
self.raw.pop(self.index)
new_exp_end = self.raw.index(')')
if new_exp_end < 0 or new_exp_end > len(self.raw)-1:
throw_error("Syntax error while parsing <actuals> of <factor>")
else:
self.raw.pop(new_exp_end)
new_exp = self.raw[self.index : new_exp_end]
actuals = self.handle_actuals(new_exp)
if len(actuals):
for a in actuals:
self.current_factor.add_actual(a)
self.handle_factor_rest()
else:
throw_error("Syntax error while parsing <factor>")
def handle_actuals(self, new_exp):
if DEBUG_LEVEL > 1.5:
print("DEBUG: handling actuals")
# exp, exp, exp, exp
actuals = []
current_exp = None
for token in new_exp:
if ',' in token:
if ',' is token:
if current_exp is not None:
current_exp.compile()
actuals.append(current_exp)
else:
throw_error("Syntax error while parsing <actuals>")
else:
throw_error("Not enough whitespace while parsing <actuals> of <factor>")
else:
if current_exp is None:
current_exp = Exp()
current_exp.raw_append(token)
if current_exp is not None:
current_exp.compile()
actuals.append(current_exp)
if actuals == []: # must be at least one exp
actuals.append(Exp())
return actuals
def handle_factor_exp(self):
if DEBUG_LEVEL > 1.5:
print("DEBUG: handling factor exp")
print("DEBUG: factor index = " + str(self.index))
print("DEBUG: raw = ")
for f in self.raw:
print(str(f))
if not self.current_factor:
throw_error("Assertion error: current <factor> is None while parsing <exp>")
# assume ( popped, exp next
new_exp_end = self.raw.index(')')
if new_exp_end == -1:
throw_error("Syntax error while parsing <exp> of <factor>")
else:
self.raw.pop(new_exp_end)
new_exp = self.raw[self.index : new_exp_end]
exp = self.handle_paren_exp(new_exp)
self.current_factor.add_exp(exp)
self.handle_factor_rest()
def handle_factor_internal_exp(self, new_exp, end):
if DEBUG_LEVEL > 1.5:
print("DEBUG: handling factor internal exp")
current_exp = None
for token in new_exp:
if end == token:
if current_exp:
current_exp.compile()
return current_exp
else:
if current_exp is None:
current_exp = Exp()
current_exp.raw_append(token)
throw_error("Reached end of <exp> without encountering closing token while parsing <factor>")
def handle_paren_exp(self, new_exp):
self.handle_factor_internal_exp(new_exp, ')')
def handle_bracket_exp(self, new_exp):
self.handle_factor_internal_exp(new_exp, ']')
def handle_factor_block(self, new_exp):
throw_error("Parser not defined for syntax <factor-block>") # TODO
def handle_factor_formals(self, new_exp):
if DEBUG_LEVEL > 1.5:
print("DEBUG: handling factor formals")
formals = []
current_formal = Formal()
for token in new_exp:
if current_formal.type is None:
if is_type(token):
current_formal.set_type(token)
else:
throw_error("Encountered " + str(token) + " while expecting <type> for <formal> for <factor>")
elif current_formal.id is None:
if is_id(token):
current_formal.set_id(token)
else:
throw_error("Encountered " + str(token) + " while expecting <id> for <formal> for <factor>")
if current_formal.type is not None and current_formal.id is not None:
formals.append(current_formal)
current_formal = Formal()
return formals
def handle_factor_lam(self):
if DEBUG_LEVEL > 1.5:
print("DEBUG: handling factor lam")
print("DEBUG: factor index = " + str(self.index))
print("DEBUG: raw = ")
for f in self.raw:
print(str(f))
if not self.current_factor:
throw_error("Assertion error: current <factor> is None while parsing lambda")
if '(' == self.raw[self.index]: # assume whitespace added
# formals
self.raw.pop(self.index)
new_exp_end = self.raw.index(')')
if new_exp_end == -1:
throw_error("Syntax error while parsing <formals> of <factor>")
else:
self.raw.pop(new_exp_end)
new_exp = self.raw[self.index : new_exp_end]
formals = self.handle_factor_formals(new_exp)
if len(formals):
for f in formals:
self.current_factor.add_formal(a)
elif ':' == self.raw[self.index]:
self.raw.pop(self.index)
ret = self.raw.pop(self.index)
if not is_rtype(ret):
throw_error("Invalid <rtype> while parsing <factor>: " + str(ret))
else:
self.set_rtype(ret)
elif '{' == self.raw[self.index]:
# block
self.raw.pop(self.index)
block_end = self.raw.index('}')
if block_end == -1:
throw_error("Syntax error while parsing <block> of <factor>")
else:
self.raw.pop(block_end)
block_exp = self.raw[self.index : block_end]
block = self.handle_factor_block(block_exp)
self.current_factor.add_block(block)
self.handle_factor_rest()
else:
throw_error("Syntax error while parsing <factor>")
def handle_factor_id(self):
if DEBUG_LEVEL > 1.5:
print("DEBUG: handling factor id")
print("DEBUG: factor index = " + str(self.index))
print("DEBUG: raw = ")
for f in self.raw:
print(str(f))
if not self.current_factor:
throw_error("Assertion error: current <factor> is None while parsing <id>")
token = self.raw.pop(self.index)
if not is_id(token):
throw_error("Encountered " + str(token) + " while expecting an <id> for <factor>")
else:
self.current_factor.set_id(token)
self.handle_factor_rest()
def handle_factor_rest(self):
if DEBUG_LEVEL > 1.5:
print("DEBUG: handling factor rest")
print("DEBUG: factor index = " + str(self.index))
print("DEBUG: raw = ")
for f in self.raw:
print(str(f))
if not self.current_factor:
throw_error("Assertion error: current <factor> is None while parsing <factor-rest>")
specials = ['(', '.', '[']
handled = False
if DEBUG_LEVEL > 2.5 and not illegal:
print("DEBUG: ------- waiting for ready (press enter to continue)")
raw_input()
self.current_factor.factor_rest = FactorRest()
self.current_factor.factor_rest.parent_factor = self.current_factor
self.current_factor = self.current_factor.factor_rest
try:
if len(self.raw) > self.index and self.raw[self.index].__class__.__name__ == "str":
token = self.raw[self.index]
for s in specials:
if s in token:
if s != token:
new_tokens = read_tight_code(self.raw.pop(self.index),internal=True, dot=True)
for n in new_tokens:
self.raw.insert(self.index, n)
self.handle_factor_rest()
return
else:
if s == '(':
handled = True
self.raw.pop(self.index)
paren_count = 0
new_exp_end = -1
for i in range(self.index, len(self.raw)):
if self.raw[i] == '(':
paren_count += 1
elif self.raw[i] == ')' and paren_count > 0:
paren_count -= 1
elif self.raw[i] == ')' and paren_count == 0:
new_exp_end = i
break
if new_exp_end < 0 or new_exp_end > len(self.raw)-1:
throw_error("Syntax error while parsing <actuals> of <factor>")
else:
self.raw.pop(new_exp_end)
new_exp = []
for _ in range(self.index, new_exp_end):
new_exp.append(self.raw.pop(self.index))
actuals = self.handle_actuals(new_exp)
if len(actuals):
for a in actuals:
self.current_factor.add_actual(a)
self.handle_factor_rest()
return
elif s == '.':
handled = True
self.raw.pop(self.index)
self.handle_factor_id()
elif s == '[':
handled = True
self.raw.pop(self.index)
self.handle_factor_exp()
else:
throw_error("Syntax error while parsing <factor-rest>")
except Exception as e:
tr = traceback.extract_stack()[1:-1]
throw_error("Syntax error while parsing <factor-rest>", trace=tr)
if DEBUG_LEVEL > 0.5:
print("DEBUG: Exception: " + str(e))
print("DEBUG: Factor=" + str(self.current_factor))
print("DEBUG: raw: " + str(self.raw))
print("DEBUG: index: " + str(self.index))
if not handled:
# factor done
if DEBUG_LEVEL > 2:
print("DEBUG: Adding factor rest to : " + str(self.raw))
self.current_factor.set_valid(True)
while self.current_factor.parent_factor is not None:
self.current_factor = self.current_factor.parent_factor
self.raw.insert(self.index, self.current_factor)
self.current_factor = None
def assert_class(self, token, class_name):
t = token.__class__.__name__
if t != class_name:
throw_error("Syntax error while parsing <factor>", addl="Expected " + str(t) + " to be " + class_name)
# deprecated?
def clean_raw(self):
for r in range(len(self.raw)):
if self.raw[r].__class__.__name__ == "NoneType":
self.raw.pop(r)
# Given raw string of <exp>, construct the abstract representations that compose it
def compile(self):
try:
self.make_factors() # convert all pieces into factors and ops
allowed_iterations = 100 # don't handle recursion past this much
if DEBUG_LEVEL > 1.5:
print("DEBUG: made factors")
print("DEBUG: factor index = " + str(self.index))
print("DEBUG: raw = ")
for f in self.raw:
print("> " + str(f))
self.clean_raw()
while self.has_op(MULOPS) and (allowed_iterations > 0):
allowed_iterations -= 1
for i in range(len(self.raw)-1):
if self.raw[i] in MULOPS:
term = Term()
term.set_op = self.raw[i]
term.right = self.raw.pop(i+1)
self.assert_class(term.right, "Factor")
term.left = self.raw[i-1]
self.assert_class(term.left, "Factor")
self.raw[i-1] = term
self.raw.pop(i) # get rid of op
self.clean_raw()
# Convert remaining Factors into Simples
for i in range(len(self.raw)):
if self.raw[i].__class__.__name__ == "Factor":
old = self.raw.pop(i)
new = Simple()
new.set_left(old)
self.raw.insert(i, new)
while self.has_op(ADDOPS) and (allowed_iterations > 0):
allowed_iterations -= 1
for i in range(len(self.raw)-1):
if self.raw[i] in ADDOPS:
simple = Simple()
simple.set_op = self.raw[i]
simple.right = self.raw.pop(i+1)
self.assert_class(simple.right, "Simple")
simple.left = self.raw[i-1]
self.assert_class(simple.left, "Simple")
self.raw[i-1] = simple
self.raw.pop(i) # get rid of op
self.clean_raw()
# Convert remaining Simples into Conjuncts
for i in range(len(self.raw)):
if self.raw[i].__class__.__name__ == "Simple":
old = self.raw.pop(i)
new = Conjunct()
new.set_left(old)
self.raw.insert(i, new)
while self.has_op(RELOPS) and (allowed_iterations > 0):
allowed_iterations -= 1
for i in range(len(self.raw)-1):
if self.raw[i] in RELOPS:
conjunct = Conjunct()
conjunct.set_op = self.raw[i]
conjunct.right = self.raw.pop(i+1)
self.assert_class(conjunct.right, "Conjunct")
conjunct.left = self.raw[i-1]
self.assert_class(conjunct.left, "Conjunct")
self.raw[i-1] = conjunct
self.raw.pop(i) # get rid of op
self.clean_raw()
# Convert remaining Conjuncts into Disjuncts
for i in range(len(self.raw)):
if self.raw[i].__class__.__name__ == "Conjunct":
old = self.raw.pop(i)
new = Disjunct()
new.set_left(old)
self.raw.insert(i, new)
while self.has_op(ANDOPS) and (allowed_iterations > 0):
allowed_iterations -= 1
for i in range(len(self.raw)-1):
if self.raw[i] in ANDOPS:
disjunct = Disjunct()
disjunct.set_op = self.raw[i]
disjunct.right = self.raw.pop(i+1)
self.assert_class(disjunct.right, "Disjunct")
disjunct.left = self.raw[i-1]
self.assert_class(disjunct.left, "Disjunct")
self.raw[i-1] = disjunct
self.raw.pop(i) # get rid of op
self.clean_raw()
# Convert remaining Disjuncts into LHSs
for i in range(len(self.raw)):
if self.raw[i].__class__.__name__ == "Disjunct":
old = self.raw.pop(i)
new = LHS()
new.set_left(old)
self.raw.insert(i, new)
while self.has_op(OROPS) and (allowed_iterations > 0):
allowed_iterations -= 1
for i in range(len(self.raw)-1):
if self.raw[i] in OROPS:
lhs = LHS()
lhs.set_op = self.raw[i]
lhs.right = self.raw.pop(i+1)
self.assert_class(lhs.right, "LHS")
lhs.left = self.raw[i-1]
self.assert_class(lhs.left, "LHS")
self.raw[i-1] = lhs
self.raw.pop(i) # get rid of op
self.clean_raw()
# Convert remaining LHS into Exp
for i in range(len(self.raw)):
if self.raw[i].__class__.__name__ == "LHS":
old = self.raw.pop(i)
new = Exp()
new.left = old
self.raw.insert(i, new)
while self.has_op(ASSIGNOPS) and (allowed_iterations > 0):
allowed_iterations -= 1
for i in range(len(self.raw)-1):
if self.raw[i] in ASSIGNOPS:
exp = Exp()
exp.op = self.raw[i]
exp.right = self.raw.pop(i+1)
self.assert_class(exp.right, "Exp")
exp.left = self.raw[i-1]
self.assert_class(exp.left, "Exp")
self.raw[i-1] = exp
self.raw.pop(i) # get rid of op
self.clean_raw()
for r in self.raw:
if r in OPS:
throw_error("Syntax error in <exp>", addl="Misplaced " + str(r))
# assert there is only one exp
if len(self.raw) > 1 or len(self.raw) < 1:
if DEBUG_LEVEL > 1.5:
print("DEBUG: exp error")
print("DEBUG: raw = ")
for f in self.raw:
print("> " + str(f))
throw_error("Parser error handling <exp>", addl="expected exactly 1 <exp>, got " + str(len(self.raw)) + ": " + str(self.raw))
except Exception as e:
throw_error("Syntax error while parsing <exp>")
if DEBUG_LEVEL > 0.5:
print("DEBUG: Exception: " + str(e))
def __str__(self):
return recursive_ast_to_string(self, "", 0)
class Factor():
def __init__(self):
self.type = "Factor"
self.id = None
self.unop = None
self.subfactor = None
self.valid = False
self.literal = None
self.formals = []
self.actuals = []
self.block = None
self.rtype = None
self.exp = None
self.types = None
self.factor_type = None
self.factor_rest = None
self.parent_factor = None
self.is_new = False
def set_valid(self, v):
self.valid = v
def set_factor_type(self, t):
self.factor_type = t
def set_unop(self, u):
self.unop = u
def set_id(self, i):
self.id = i
def add_exp(self, e):
self.exp = e
def set_rtype(self, r):
self.rtype = r
def set_block(self, b):
self.block = b
def add_formal(self, f):
self.formals.append(f)
def add_actual(self, a):
self.actuals.append(a)
def set_subfactor(self, s):
self.subfactor = s
def set_literal(self, l):
self.literal = l
def __str__(self):
return recursive_ast_to_string(self, "", 0)
class FactorRest(Factor):
def __init__(self):
Factor.__init__(self)
class ExpPiece:
def __init__(self):
self.left = None
self.op = None
self.right = None
self.type = None
def set_left(self, l):
self.left = l
def set_op(self, o):
self.op = o
def set_right(self, r):
self.right = r
def __str__(self):
return recursive_ast_to_string(self, "", 0)
class LHS(ExpPiece):
def __init__(self):
ExpPiece.__init__(self)
self.type = "LHS"
class Disjunct(ExpPiece):
def __init__(self):
ExpPiece.__init__(self)
self.type = "Disjunct"
class Conjunct(ExpPiece):
def __init__(self):
ExpPiece.__init__(self)
self.type = "Conjunct"
class Simple(ExpPiece):
def __init__(self):
ExpPiece.__init__(self)
self.type = "Simple"
class Term(ExpPiece):
def __init__(self):
ExpPiece.__init__(self)
self.type = "Term"
############################
######### MAIN #############
############################
def run(input, output):
import os
global line_count
global illegal
with open(input, 'r') as file:
line = file.readline()
while (line):
line_count += 1
line = tokenize_line(line)
if DEBUG_LEVEL > 2.5 and not illegal:
print("DEBUG: ------- waiting for ready (press enter to continue)")
raw_input()
line = file.readline()
with open(output, 'w') as file:
if illegal:
file.write("(illegal)")
else:
file.write(ast_to_string())
if illegal:
print("\nEncountered syntax error while parsing " + str(input) + ":")
print(error_msg)
if DEBUG_LEVEL > 1.5:
print(ast_to_string())
# convert line into tokens
# some trickery involved for strings that span multiple lines
# repeat parameter used if we need to re-tokenize
def tokenize_line(line, repeat=False):
global parsing_string
global current_token
if DEBUG_LEVEL > 0 and not illegal and not repeat:
if DEBUG_LEVEL > 1.5:
print("\n\n")
print("DEBUG: INPUT (line " + str(line_count) + "): " + line[:-1])
if repeat: # if re-tokenizing, stash current token
temp_token = current_token
current_token = ""
for c in line:
if c == "\"" and current_token != "":
# " marks start or end of token
if not parsing_string:
# new token
add_to_ast(current_token)
current_token = "" + c
parsing_string = True
else:
current_token += c
add_to_ast(current_token)
current_token = ""
parsing_string = False
elif ord(c) in WHITESPACE:
if parsing_string:
# write whitespace token as backslash escape readable char
if ord(c) == 0xa:
throw_error("Forbidden character: '\\n' in <stringliteral>")
elif ord(c) == 0xd:
throw_error("Forbidden character: '\\r' in <stringliteral>")
current_token += c
elif "function" in current_token:
pass # clearing whitespace but not destroying current token
elif current_token != "":
# wrap up and send token
token = current_token
current_token = ""
if token.startswith("//"): # comment, skip the rest of line
current_token = "" #redundant?
break
elif token.startswith("\""):
parsing_string = True
elif token.startswith("\"") and token.endswith("\"") and len(token) > 1: #deprecated?
add_to_ast(token)
current_token = ""
parsing_string = False
else:
add_to_ast(token)
current_token = ""
else:
pass # just clearing whitespace
elif is_valid_char(c) or c == "\"" or c == "\'":
current_token += c
if not parsing_string and "function" in current_token and current_token.count(')') == 2: # type hack
# wrap up and send token
add_to_ast(current_token)
current_token = ""
if current_token.startswith("\""):
parsing_string = True
if current_token.startswith("\"") and current_token.endswith("\"") and len(current_token) > 1: #deprecated?
add_to_ast(current_token)
current_token = ""
parsing_string = False
else:
throw_error("Forbidden character: \'" + str(c) + "\'")
break
if current_token and not current_token.startswith("//") and not parsing_string:
add_to_ast(current_token)
current_token = ""
if current_token.startswith("//"): # end of comment line
current_token = ""
if repeat:
current_token = temp_token
############################
####### PRINT TO AST #######
############################
def ast_to_string():
return setup_ast_to_string(protocols, classes, stms) + "\n"
def setup_ast_to_string(protocols, classes, stms):
if expecting[0] != "<end>":
throw_error("Error: reached end of program while expecting " + expecting[0])
out = ""
out += "(program ("
indent = 1
for p in protocols:
out = recursive_ast_to_string(p, out, indent)
out += ") ("
for c in classes:
out = recursive_ast_to_string(c, out, indent)
out += ")"
for s in stms:
out = recursive_ast_to_string(s, out, indent)
out += ")"
if DEBUG_LEVEL > 0.5 and not illegal:
print("DEBUG: OUTPUT: ")
print(out)
return out
# recursively parse an object, adding to out str which gets returned
# continuation parameter used to supress newline + indentation + (
def recursive_ast_to_string(obj, out, indent_level, continuation=False):
if DEBUG_LEVEL > 2:
print("DEBUG: " + " " * indent_level + " PRINTING " + obj.__class__.__name__)
if DEBUG_LEVEL > 2.5:
print("DEBUG: ------- waiting for ready (press enter to continue)")
raw_input()
if not continuation:
out += "\n" + INDENTATION * indent_level + "("
if obj.__class__.__name__ == "Protocol":
out += "protoDec " + str(obj.typeid) + " ("
for tvar in obj.typevars:
out += str(tvar) + " "
out += ") ("
for t in obj.extends:
out = recursive_ast_to_string(t, out, indent_level + 1)
out += ") ("
for fp in obj.funprotos:
if fp == obj.funprotos[0]: # making format look like example
out = recursive_ast_to_string(fp, out, indent_level + 1)
else:
out = recursive_ast_to_string(fp, out, indent_level + 1)
out += ")"
elif obj.__class__.__name__ == "Class":
out += "classDec " + str(obj.id) + " ( "
for tvar in obj.typevars:
out += str(tvar) + " "
out += ")("
for i in obj.implements:
out = recursive_ast_to_string(i, out, indent_level + 1)
out += ")(init ("
for formal in obj.init_formals:
out = recursive_ast_to_string(formal, out, indent_level + 1)
out += ") "
out = recursive_ast_to_string(obj.block, out, indent_level + 1)
out += ")("
for bd in obj.bodydecs:
out = recursive_ast_to_string(bd, out, indent_level + 1)
out += ")"
elif obj.__class__.__name__ == "Funproto":
out += "funProto " + str(obj.id) + " ("
for tvar in obj.typevars:
out += str(tvar) + " "
out += ") ("
for formal in obj.formals:
out = recursive_ast_to_string(formal, out, indent_level + 1)
if formal != obj.formals[-1]: # make formatting look like example
out += " "
out += ") "
if obj.rtype:
out += " (" + str(obj.rtype) + ")"
else:
out += "(void) "
elif obj.__class__.__name__ == "Fundec":
out += "funDec " + str(obj.id) + " ( "
for tvar in obj.typevars:
out += str(tvar) + " "
out += ")("
for formal in obj.formals:
out = recursive_ast_to_string(formal, out, indent_level + 1)
out += ")"
if obj.rtype:
out += " (" + str(obj.rtype) + ")"
else:
out += "(void) "
out = recursive_ast_to_string(obj.block, out, indent_level + 1)
elif obj.__class__.__name__ == "Typeapp":
if obj.typeid:
out += "typeApp " + str(obj.typeid) + "()" # TODO plus <<types>>
else: # obj.tvar:
out += "typeApp " + str(obj.tvar)
elif obj.__class__.__name__ == "Formal":
out += "formal (" + str(obj.type) + ") " + str(obj.id)
elif obj.__class__.__name__ == "Block":
out += "block ( "
for dec in obj.local_decs:
out = recursive_ast_to_string(dec, out, indent_level + 1)
out += ") ("
for stm in obj.stms:
out = recursive_ast_to_string(stm, out, indent_level + 1)
out += ")"
elif obj.__class__.__name__ == "Stm":
if obj.style == "empty":
out += "skip"
elif obj.style == "exp":
if len(obj.exps) != 1:
throw_error("Parser error: expected <expStm> to have exactly 1 <exp>, has " + str(len(obj.exps)))
out += "expStm "
out = recursive_ast_to_string(obj.exps[0], out, indent_level + 1, continuation=True)
elif obj.style == "if":
if len(obj.exps) != 1 or len(obj.stms) != 2:
throw_error("Parser error: expected <ifStm> to have exactly 1 <exp> and 2 <stm>")
out += "("
out = recursive_ast_to_string(obj.exps[0], out, indent_level + 1,continuation=True)
out += " "
out = recursive_ast_to_string(obj.stms[0], out, indent_level + 1,continuation=True)
out += " "
out = recursive_ast_to_string(obj.stms[1], out, indent_level + 1,continuation=True)
elif obj.style == "while":
if len(obj.exps) != 1 or len(obj.stms) != 1:
throw_error("Parser error: expected <whileStm> to have exactly 1 <exp> and 1 <stm>")
out += "while "
out = recursive_ast_to_string(obj.exps[0], out, indent_level + 1,continuation=True)
out = recursive_ast_to_string(obj.stms[0], out, indent_level + 1,continuation=True)
elif obj.style == "for":
if len(obj.exps) != 2 or len(obj.stms) != 1:
throw_error("Parser error: expected <forStm> to have exactly 2 <exp> and 1 <stm>")
out = recursive_ast_to_string(obj.vardec, out, indent_level + 1,continuation=True)
new_stm = obj
new_stm.set_style("while")
increment_exp = new_stm.exps.pop(1)
increment_stm = Stm()
increment_stm.set_style("exp")
increment_stm.add_exp(increment_exp)
new_stm.add_stm(increment_stm)
obj = recursive_ast_to_string(new_stm, out, indent_level + 1,continuation=True)
elif obj.style == "return0":
out += "return0"
elif obj.style == "return":
if len(obj.exps):
out += "return "
out = recursive_ast_to_string(obj.exps[0], out, indent_level + 1, continuation=True)
else:
out += "return0" #redundant failure catch?
elif obj.style == "block":
out = out[:-1] # skip extra set of parens
out = recursive_ast_to_string(obj.block, out, indent_level + 1)
out = out[:-1] # skip extra set of parens
elif obj.style == "halt":
if len(obj.exps) != 1:
throw_error("Parser error: expected <haltStm> to have exactly 1 <exp>")
out += "(halt "
out = recursive_ast_to_string(obj.exps[0], out, indent_level + 1,continuation=True)
out = recursive_ast_to_string(obj.exps[0], out, indent_level + 1,continuation=True)
else:
throw_error("Parser error: Stm with no specified style")
elif obj.__class__.__name__ == "Constdec":
out += "constant " + str(obj.type) + " " + str(obj.id) + " " + str(obj.lit)
elif obj.__class__.__name__ == "Vardec":
out += "varDec "
if is_typeid(obj.type):
ta = Typeapp()
ta.typeid = obj.type
out = recursive_ast_to_string(ta, out, indent_level + 1)
else:
out += str(obj.type)
out += " " + str(obj.id) + " "
out = recursive_ast_to_string(obj.exp, out, indent_level + 1, continuation=True)
elif obj.__class__.__name__ == "Globaldec":
out += "static " + str(obj.type) + " " + str(obj.id) + " " + str(obj.lit)
elif obj.__class__.__name__ == "Fielddec":
out += "fieldDec (formal " + str(obj.type) + " " + str(obj.id)
elif obj.__class__.__name__ == "Exp":
for r in obj.raw:
out = recursive_ast_to_string(r, out, indent_level + 1, continuation=True)
elif obj.__class__.__name__ == "LHS":
if obj.op is not None:
if obj.op not in ASSIGNOPS:
throw_error("Parser error: expected <lhs> to use <assignop>")
else:
out += "(assign "
out = recursive_ast_to_string(obj.left, out, indent_level + 1, continuation=True)
out = recursive_ast_to_string(obj.right, out, indent_level + 1, continuation=True)
else:
out = recursive_ast_to_string(obj.left, out, indent_level + 1)
elif obj.__class__.__name__ == "Disjunct":
if obj.op is not None:
out += "(binOpn "
out += obj.op + " "
out = recursive_ast_to_string(obj.left, out, indent_level + 1, continuation=True)
out = recursive_ast_to_string(obj.right, out, indent_level + 1, continuation=True)
else:
out = recursive_ast_to_string(obj.left, out, indent_level + 1, continuation=True)
elif obj.__class__.__name__ == "Conjunct":
if obj.op is not None:
out += "(binOpn "
out += obj.op + " "
out = recursive_ast_to_string(obj.left, out, indent_level + 1, continuation=True)
out = recursive_ast_to_string(obj.right, out, indent_level + 1, continuation=True)
else:
out = recursive_ast_to_string(obj.left, out, indent_level + 1, continuation=True)
elif obj.__class__.__name__ == "Simple":
if obj.op is not None:
out += "(binOpn "
out += obj.op + " "
out = recursive_ast_to_string(obj.left, out, indent_level + 1, continuation=True)
out = recursive_ast_to_string(obj.right, out, indent_level + 1, continuation=True)
else:
out = recursive_ast_to_string(obj.left, out, indent_level + 1, continuation=True)
elif obj.__class__.__name__ == "Term":
if obj.op is not None:
out += "(binOpn "
out += obj.op + " "
out = recursive_ast_to_string(obj.left, out, indent_level + 1, continuation=True)
out = recursive_ast_to_string(obj.right, out, indent_level + 1, continuation=True)
else:
out = recursive_ast_to_string(obj.left, out, indent_level + 1, continuation=True)
elif obj.__class__.__name__ == "Factor":
if obj.factor_rest:
out = recursive_ast_to_string(obj.factor_rest, out, indent_level + 1, continuation=True)
else:
if obj.unop is not None:
out += "(unOpn "
out += obj.unop + " "
out = recursive_ast_to_string(obj.subfactor, out, indent_level + 1, continuation=True)
elif obj.literal is not None:
out = recursive_ast_to_string(obj.literal, out, indent_level + 1, continuation=True)
elif obj.formals != []:
out += "(lambda ("
for formal in obj.formals:
out = recursive_ast_to_string(formal, out, indent_level + 1, continuation=True)
out += ")"
if obj.rtype:
out += "(" + obj.rtype + ")"
else:
out += "(void)"
out = recursive_ast_to_string(obj.block, out, indent_level + 1, continuation=True)
elif obj.factor_type is not None:
out += "newObject (classApp " + obj.id + " ( "
throw_error("Parser error, undefined handling of newObject <factor> <<types>>")
elif obj.is_new:
out += "newObject " + obj.id + " ( "
if obj.exp:
out = recursive_ast_to_string(obj.exp, out, indent_level + 1, continuation=True)
out += ")"
else: # call of something else
if obj.id:
out += obj.id + " " #+ " ( " # fixme?
else:
throw_error("Parser error, undefined <factor>")
elif obj.__class__.__name__ == "FactorRest":
if obj.factor_rest:
out = recursive_ast_to_string(obj.factor_rest, out, indent_level + 1, continuation=True)
else:
factor = copy.copy(obj.parent_factor)
factor.factor_rest = None # marking that we already made note of the factor-rest
if obj.actuals != []:
out += "call "
out = recursive_ast_to_string(factor, out, indent_level + 1, continuation=True)
out += "("
for actual in obj.actuals:
out = recursive_ast_to_string(actual, out, indent_level + 1, continuation=True)
out += ")"
elif obj.id != None:
out += "(dot "
out += obj.id + " "
out = recursive_ast_to_string(factor, out, indent_level + 1, continuation=True)
out += ")"
elif obj.exp != None:
out += "(aref "
out = recursive_ast_to_string(factor, out, indent_level + 1, continuation=True)
out += obj.exp
out += ")"
else:
out += "("
out = recursive_ast_to_string(factor, out, indent_level + 1, continuation=True)
out += ")"
elif obj.__class__.__name__ == "str":
# literal
if obj == "null" or obj == "true" or obj == "false":
out += "(" + obj + ")"
elif is_charliteral(obj):
out += "(charLiteral " + obj
elif is_stringliteral(obj):
out += "(stringLiteral " + obj
elif is_intliteral(obj):
out += "(intLiteral " + obj
elif is_floatliteral(obj):
out += "(floatLiteral " + obj
else:
throw_error("Parser error, unknown literal")
else:
throw_error("Parser error while writing " + obj.__class__.__name__)
if not continuation:
out += ")"
if DEBUG_LEVEL > 2.5:
print("DEBUG: JUST PRINTED " + obj.__class__.__name__)
print("DEBUG: CURRENT OUTPUT: ")
print(out)
print("DEBUG: ------- waiting for ready (press enter to continue)")
raw_input()
return out
############################
#### GENERAL HELPERS #######
############################
def stacktrace(trace=None):
if DEBUG_LEVEL > 0.5:
print("\nDEBUG: Error traceback:")
if not trace:
trace = traceback.extract_stack()[1:-1]
for l in trace:
print("------ " + str(l))
print("Expecting: " + str(expecting))
print("Current object type: " + str(current_obj_type))
print("Current object: " + str(current_obj))
def throw_error(reason, addl="", trace=None):
global line_count
global illegal
global error_msg
if illegal and error_msg: # already have an error
return
illegal = True
error_line = line_count
error_msg = reason + " in line " + str(error_line)
if addl:
error_msg += "\n" + addl
stacktrace(trace)
# if current obj handling is None, pop from stack
def check_current_obj():
global current_obj
global current_obj_type
global object_stack
global object_stack_type
if current_obj == None:
if len(object_stack) > 0:
if object_stack[0] != None:
# pop object stack
current_obj = object_stack[0]
current_obj_type = object_type_stack[0]
object_stack = object_stack[1:]
object_stack_type = object_stack_type[1:]
if DEBUG_LEVEL > 0:
print("Object stack popped. Current obj = " + current_obj_type)
print("Remaining obj stack = " + str(object_stack_type))
# Find the handler to the token, a handler handler
def add_to_ast(token):
global expecting
if illegal:
return
if DEBUG_LEVEL > 1:
print("DEBUG: Tokenizing <" + token + "> while expecting " + expecting[0])
if DEBUG_LEVEL > 1.5:
print("DEBUG: Expecting: " + str(expecting))
print("DEBUG: Current obj: " + str(current_obj_type))
print("DEBUG: Obj stack: " + str(object_stack))
check_current_obj()
if expecting[0] == "<end>":
throw_error("Encountered token <" + token + "> while expecting end of program")
try:
handler = TOKEN_TO_HANDLER[expecting[0]]
if DEBUG_LEVEL > 1:
print("DEBUG: Token <" + token + "> sent to " + str(handler))
if DEBUG_LEVEL > 1.5:
print("DEBUG: EXPECTING: " + str(expecting))
handler(token)
except KeyError as e:
print("\nParser error: No handler for token " + token + " while expecting " + expecting[0])
stacktrace()
exit(1)
# handle tokens that have less whitespace than we hope for, such as (int) or :void
# puts space between all special characters
def read_tight_code(token, internal=False, dot=False, angle=False):
if "function" in token:
throw_error("Parser error, trying to split a function type")
if is_stringliteral(token):
space = "\x20"
new_line = token.replace(" ", space)
return new_line
tight_tokens = ['(', ')', '{', '}', ',', ':', ';']
if dot:
tight_tokens.append('.')
if angle:
tight_tokens.append('<')
tight_tokens.append('>')
new_line = token
for t in tight_tokens:
new_line = new_line.replace(t, " " + t + " ")
if DEBUG_LEVEL > 1:
print("DEBUG: \'" + token + "\' loosened to \'" + new_line + "\'")
if not internal:
tokenize_line(new_line, repeat=True)
else:
return(new_line.split())
def assert_obj_type(t):
#global current_obj_type
if t == current_obj_type:
return True
else:
throw_error("Parser Error: Encountered a " + t + " while expecting object of type " + str(current_obj_type))
return False
# push current object to stack
def push_stack():
global current_obj
global current_obj_type
global object_stack
global object_stack_type
object_stack.insert(0, current_obj)
object_type_stack.insert(0, current_obj_type)
current_obj = None
current_obj_type = None
# pop object from stack to current
def pop_stack():
global current_obj
global current_obj_type
global object_stack
global object_type_stack
if len(object_stack) > 0:
current_obj = object_stack[0]
current_obj_type = object_type_stack[0]
object_stack = object_stack[1:]
object_type_stack = object_type_stack[1:]
else:
current_obj = None
current_obj_type = None
############################
### TOKEN HANDLERS #########
############################
def handle_protodecs(token):
global expecting
global current_obj
global current_obj_type
global protocols
if token == "protocol":
expecting.insert(0, "<protodec>")
if current_obj:
push_stack()
current_obj = Protocol()
current_obj_type = "Protocol"
protocols.append(current_obj)
else:
# no more protodecs, find a new handler
expecting = expecting[1:]
add_to_ast(token)
def handle_protodec(token):
global expecting
global current_obj
global current_obj_type
if assert_obj_type("Protocol"):
# expect id first
if current_obj.typeid == None:
if is_id(token, proto=True):
current_obj.set_typeid(token)
add_typeid(token) # define new typeid
elif '<' in token:
read_tight_code(token, angle=True)
else:
throw_error("Encountered " + token + " while expecting a typeid for a <protodec>")
# tvars next, can be empty or Tvar or Tvar, Tvar, ... Tvar
elif '<' in token:
if '<' is token:
current_obj.open_tvars = True
current_obj.set_expecting(True)
else:
read_tight_code(token, angle=True)
elif current_obj.open_tvars:
if '>' in token:
if '>' is token:
if current_obj.expecting_more_vars:
throw_error("Syntax error in <protodec>", addl="Expecting another <typevar>")
else:
current_obj.open_tvars = False
else:
read_tight_code(token)
elif ',' == token: # expect another tvar
if current_obj.expecting_more_vars:
throw_error("Syntax error", addl="Too many commas in typevars?")
current_obj.set_expecting(True)
elif ',' in token: # comma is part of another token
read_tight_code(token)
elif is_tvar(token): # no comma
current_obj.add_typevar(token)
current_obj.set_expecting(False)
elif token == "extends":
if current_obj.expecting_more_vars:
throw_error("Syntax error", addl="Expecting another typevar")
else:
expecting.insert(0, "<extends-rest>")
elif '{' in token:
if current_obj.expecting_more_vars:
throw_error("Syntax error", addl="Expecting another typevar")
else:
if '{' == token:
expecting.insert(0, "<funprotos>")
else:
throw_error("Syntax error while parsing a <protodec>")
elif '}' in token:
if '}' is token:
expecting = expecting[1:]
if assert_obj_type("Protocol"):
pop_stack()
else:
throw_error("Parser error handling <protodec>")
else:
read_tight_code(token)
else:
throw_error("Encountered " + token + " while parsing a " + str(current_obj_type))
else:
throw_error("Protocol expected")
def handle_rtype(token):
global expecting
if ':' in token or ')' in token or ';' in token:
read_tight_code(token)
elif not is_rtype(token):
throw_error("Encountered " + token + " while expecting <rtype>")
else:
current_obj.set_rtype(token)
expecting = expecting[1:] # rtype finished
def handle_formals(token):
global expecting
global current_obj
global current_obj_type
if ')' in token and "function" not in token or ("function" in token and token.count(')') > 2): # type hack, messy
if ')' is token:
if expecting[0] == "<formals-rest>":
throw_error("Expecting another <formal>")
expecting = expecting[1:] # rest of formals is empty
else:
if "function" not in token:
read_tight_code(token)
elif ')' == token[-1]:
tokenize_line(')', repeat=True)
tokenize_line(token[:-1], repeat=True)
else:
throw_error("Parser error, unable to parse <formals>")
elif ',' in token:
if ',' is token:
expecting.insert(0, "<formals-rest>") # expect another formal
else:
read_tight_code(token)
elif is_type(token):
if current_obj_type == "Formal":
throw_error("Encountered type " + token \
+ " while parsing a <formal> that already had a type " + str(current_obj.type))
else:
if expecting[0] == "<formals-rest>":
expecting[0] = "<formal>"
else:
expecting.insert(0, "<formal>")
if current_obj:
push_stack()
current_obj = Formal()
current_obj_type = "Formal"
current_obj.set_type(token)
else:
throw_error("Encountered " + token + " while expecting a <type> for a <formal>")
def handle_formal(token):
global expecting
global current_obj
global current_obj_type
assert_obj_type("Formal")
if not is_id(token):
if ')' in token:
read_tight_code(token)
elif ',' in token:
if ',' is token:
throw_error("Encountered " + token + " while expecting an <id>")
else:
read_tight_code(token)
else:
throw_error("Encountered " + token + " while expecting an <id>")
else:
current_obj.set_id(token)
# add formal to its parent object
formal_obj = current_obj
pop_stack()
current_obj.add_formal(formal_obj)
expecting = expecting[1:] # formal finished
def handle_classdecs(token):
global expecting
global current_obj
global current_obj_type
global classes
if token == "class":
expecting.insert(0, "<classdec>")
if current_obj:
push_stack()
current_obj = Class()
current_obj_type = "Class"
classes.append(current_obj)
elif '}' == token:
expecting = expecting[1:] # rest of classdecs is empty
pop_stack()
else:
expecting = expecting[1:] # rest of classdecs is empty
add_to_ast(token)
def handle_classdec(token):
global expecting
global current_obj
global current_obj_type
if assert_obj_type("Class"):
# expect id first
if current_obj.id == None:
if is_id(token):
current_obj.set_id(token)
elif '<' in token:
read_tight_code(token, angle=True)
else:
throw_error("Encountered " + token + " while expecting an <id> for a <classdec>")
# tvars next, can be empty or Tvar or Tvar, Tvar, ... Tvar
elif '<' in token and not '(' in token:
if '<' is token:
current_obj.open_tvars = True
current_obj.set_expecting(True)
else:
read_tight_code(token, angle=True)
elif current_obj.open_tvars:
if '>' in token:
if '>' is token:
if current_obj.expecting_more_vars:
throw_error("Syntax error in <classdec>", addl="Expecting another <typevar>")
else:
current_obj.open_tvars = False
else:
read_tight_code(token)
elif ',' == token: # expect another tvar
if current_obj.expecting_more_vars:
throw_error("Syntax error", addl="Too many commas in typevars?")
current_obj.set_expecting(True)
elif ',' in token: # comma is part of another token
read_tight_code(token)
elif is_tvar(token): # no comma
current_obj.add_typevar(token)
current_obj.set_expecting(False)
elif token == "implements":
if current_obj.expecting_more_vars:
throw_error("Syntax error", addl="Expecting another typevar")
else:
expecting.insert(0, "<implements-rest>")
elif '{' in token:
if current_obj.expecting_more_vars:
throw_error("Syntax error", addl="Expecting another typevar")
elif len(current_obj.implements) < 1:
throw_error("Class " + current_obj.id + " must implement <typeapps>")
else:
if '{' == token:
expecting.insert(0, "<classbody>")
else:
throw_error("Syntax error while parsing a <classdec>")
elif '}' in token:
if '}' is token:
expecting = expecting[1:] # consume token, done with classdec
if assert_obj_type("Class"):
pop_stack()
else:
throw_error("Parser error handling <classdec>")
else:
read_tight_code(token)
else:
throw_error("Encountered " + token + " while parsing a " + str(current_obj_type))
else:
throw_error("Syntax error while parsing a <classdec>")
def handle_extends(token):
global expecting
global current_obj
global current_obj_type
if expecting[0] == "<extends-rest>":
if ',' in token:
if ',' == token:
throw_error("Syntax error while parsing <typeapps> of a <protodec>")
else:
read_tight_code(token)
else:
if is_typeapp(token):
current_obj.add_extends(token)
expecting[0] = "<extends>"
else:
throw_error("Encountered " + token + " while expecting a <typeapp> for Protocol " + current_obj.typeid)
elif expecting[0] == "<extends>":
# expecting ',' or end of implements here
if ',' in token:
if ',' == token:
expecting[0] = "<extends-rest>"
else:
read_tight_code(token)
else:
expecting = expecting[1:]
add_to_ast(token) # return to handler
def handle_implements(token):
global expecting
global current_obj
global current_obj_type
if expecting[0] == "<implements-rest>":
if ',' in token:
if ',' == token:
throw_error("Syntax error while parsing <typeapps> of a <classdec>")
else:
read_tight_code(token)
else:
if is_typeapp(token):
current_obj.add_implements(token)
expecting[0] = "<implements>"
else:
throw_error("Encountered " + token + " while expecting a <typeapp> for Class " + current_obj.id)
elif expecting[0] == "<implements>":
# expecting ',' or end of implements here
if ',' in token:
if ',' == token:
expecting[0] = "<implements-rest>"
else:
read_tight_code(token)
else:
expecting = expecting[1:]
add_to_ast(token) # return to handler
def handle_classbody(token):
global expecting
global current_obj
global current_obj_type
# already took care of {
# now expecting ( <formals> ) <block> <bodydecs> }
if not assert_obj_type("Class"):
throw_error("Encountered a <classbody> while not parsing a <classdec>")
else:
if '(' in token:
if '(' is token:
current_obj.found_formals()
expecting.insert(0, "<formals>")
else:
read_tight_code(token)
else:
if current_obj.expecting_formals:
throw_error("Expecting (<formals>) for <init> of <classbody>")
elif '{' in token:
if '{' is token:
expecting[0] = "<bodydecs-plus-bracket>"
expecting.insert(0, "<block>")
if current_obj:
push_stack()
current_obj = Block()
current_obj_type = "Block"
else:
read_tight_code(token)
# assume block handler adds the block to our obj
else:
throw_error("Syntax error while parsing <classbody>")
def handle_bodydecs_plus_bracket(token):
if token != '}':
throw_error("End of <block> expected before <bodydecs>")
else:
expecting[0] = "<bodydecs>"
# redirecter for figuring out which dec is next
def handle_bodydecs(token):
global expecting
global current_obj
global current_obj_type
# each dec will add itself to obj, return on }
if "constant" == token:
expecting.insert(0, "<constdec>")
if current_obj:
push_stack()
current_obj = Constdec()
current_obj_type = "Constdec"
elif "static" == token:
expecting.insert(0, "<globaldec>")
if current_obj:
push_stack()
current_obj = Globaldec()
current_obj_type = "Globaldec"
elif "fun" == token:
expecting.insert(0, "<fundec>")
if current_obj:
push_stack()
current_obj = Fundec()
current_obj_type = "Fundec"
elif is_type(token):
expecting.insert(0, "<fielddec>")
if current_obj:
push_stack()
current_obj = Fielddec()
current_obj_type = "Fielddec"
add_to_ast(token) # don't consume token
elif '}' in token:
if '}' is token:
expecting = expecting[2:] # done with class too
add_to_ast(token)
def handle_funprotos(token):
global expecting
global current_obj
global current_obj_type
if token == "fun":
expecting.insert(0, "<funproto>")
if current_obj:
push_stack()
current_obj = Funproto()
current_obj_type = "Funproto"
else:
expecting = expecting[1:] # rest of funprotos is empty
add_to_ast(token) # find a new handler
def handle_funproto(token):
global expecting
global current_obj
global current_obj_type
if not assert_obj_type("Funproto"):
throw_error("Funproto expected")
else:
if current_obj.id == None:
if is_id(token):
current_obj.set_id(token)
elif '<' in token:
read_tight_code(token, angle=True)
else:
throw_error("Encountered " + token + " while expecting an <id> for a <funproto>")
# tvars next, can be empty or Tvar or Tvar, Tvar, ... Tvar
elif '<' in token and not '(' in token:
if '<' is token:
current_obj.open_tvars = True
current_obj.set_expecting(True)
else:
read_tight_code(token, angle=True)
elif current_obj.open_tvars:
if '>' in token:
if '>' is token:
if current_obj.expecting_more_vars:
throw_error("Syntax error in <protodec>", addl="Expecting another <typevar>")
else:
current_obj.open_tvars = False
else:
read_tight_code(token)
elif ',' == token: # expect another tvar
if current_obj.expecting_more_vars:
throw_error("Syntax error", addl="Too many commas in typevars?")
current_obj.set_expecting(True)
elif ',' in token: # comma is part of another token
read_tight_code(token)
elif is_tvar(token): # no comma
current_obj.add_typevar(token)
current_obj.set_expecting(False)
elif '(' in token:
if current_obj.expecting_more_vars:
throw_error("Syntax error", addl="Expecting another typevar")
else:
if '(' == token:
#expecting = expecting[1:] # rest of funprotos is formals, possibly rtype
expecting.insert(0, "<formals>")
else:
if '(' == token[0]:
tokenize_line('(', repeat=True)
tokenize_line(token[1:], repeat=True)
else:
throw_error("Parser error, unable to parse <formals>")
elif ':' in token:
if ':' is token:
expecting.insert(0, "<rtype>")
else:
read_tight_code(token)
elif ';' is token:
expecting = expecting[1:] # end of fun proto
fp_obj = current_obj
pop_stack()
current_obj.add_funproto(fp_obj)
else:
throw_error("Syntax error in <funproto>", addl="Did you forget a semicolon or parenthesis?")
def handle_vardec(token):
global expecting
global current_obj
global current_obj_type
if not assert_obj_type("Vardec"):
throw_error("Vardec expected")
else:
if current_obj.type is None:
if not is_type(token):
throw_error("Encounted " + token + " while expecting a <type> for <vardec>")
else:
current_obj.set_type(token)
elif current_obj.id is None:
if not is_id(token):
throw_error(token + " is not a valid <id> for <vardec>")
else:
current_obj.set_id(token)
elif not current_obj.eq:
if token == ASSIGNOPS[0]:
current_obj.consume_eq()
else:
throw_error("Expecting <assignop> while parsing <vardec>")
elif current_obj.exp is None:
expecting.insert(0, "<exp-semi>")
add_to_ast(token)
else:
assert_obj_type("Vardec")
expecting = expecting[1:] # consume char, done
dec_obj = current_obj
pop_stack()
current_obj.add_dec(dec_obj)
add_to_ast(token) # return to handler
def handle_constdec(token):
# assume constant token was already consumed
global expecting
global current_obj
global current_obj_type
if not assert_obj_type("Constdec"):
throw_error("Constdec expected")
else:
if current_obj.type is None:
if not token in PRIMTYPES:
throw_error("Encounted " + token + " while expecting a <primtype> for <constdec>")
else:
current_obj.set_type(token)
elif current_obj.id is None:
if not is_id(token):
throw_error(token + " is not a valid <id> for <constdec>")
else:
current_obj.set_id(token)
elif not current_obj.eq:
if token == ASSIGNOPS[0]:
current_obj.consume_eq()
else:
throw_error("Expecting <assignop> while parsing <constdec>")
elif current_obj.lit is None:
if not is_literal(token):
throw_error(token + " is not a valid <literal> for <constdec>")
else:
current_obj.lit = token
elif ';' in token:
if ';' is token:
expecting = expecting[1:] # consume char, done
dec_obj = current_obj
pop_stack()
current_obj.add_dec(dec_obj)
else:
read_tight_code(token)
def handle_globaldec(token):
# assume static token was already consumed
global expecting
global current_obj
global current_obj_type
if not assert_obj_type("Globaldec"):
throw_error("Globaldec expected")
else:
if current_obj.type is None:
if not token in PRIMTYPES:
throw_error("Encounted " + token + " while expecting a <primtype> for <globaldec>")
else:
current_obj.set_type(token)
elif current_obj.id is None:
if not is_id(token):
throw_error(token + " is not a valid <id> for <globaldec>")
else:
current_obj.set_id(token)
elif not current_obj.eq:
if token == ASSIGNOPS[0]:
current_obj.consume_eq()
else:
throw_error("Expecting <assignop> while parsing <globaldec>")
elif current_obj.lit is None:
if not is_literal(token):
throw_error(token + " is not a valid <literal> for <globaldec>")
else:
current_obj.lit = token
elif ';' in token:
if ';' is token:
expecting = expecting[1:] # consume char, done
dec_obj = current_obj
pop_stack()
current_obj.add_dec(dec_obj)
else:
read_tight_code(token)
def handle_fielddec(token):
global expecting
global current_obj
global current_obj_type
if not assert_obj_type("Fielddec"):
throw_error("Fielddec expected")
else:
if current_obj.type is None:
if not is_type(token):
throw_error("Encounted " + token + " while expecting a <type> for <fielddec>")
else:
current_obj.set_type(token)
elif current_obj.id is None:
if not is_id(token):
throw_error(token + " is not a valid <id> for <fielddec>")
else:
current_obj.set_id(token)
elif ';' in token:
if ';' is token:
expecting = expecting[1:] # consume char, done
dec_obj = current_obj
pop_stack()
current_obj.add_dec(dec_obj)
else:
read_tight_code(token)
def handle_fundec(token):
global expecting
global current_obj
global current_obj_type
if not assert_obj_type("Fundec"):
throw_error("Fundec expected")
else:
if current_obj.id == None:
if is_id(token):
current_obj.set_id(token)
elif '<' in token:
read_tight_code(token, angle=True)
else:
throw_error("Encountered " + token + " while expecting an <id> for a <fundec>")
# tvars next, can be empty or Tvar or Tvar, Tvar, ... Tvar
elif '<' in token:
if '<' is token:
current_obj.open_tvars = True
current_obj.set_expecting(True)
else:
read_tight_code(token, angle=True)
elif current_obj.open_tvars:
if '>' in token:
if '>' is token:
if current_obj.expecting_more_vars:
throw_error("Syntax error in <protodec>", addl="Expecting another <typevar>")
else:
current_obj.open_tvars = False
else:
read_tight_code(token)
elif ',' == token: # expect another tvar
if current_obj.expecting_more_vars:
throw_error("Syntax error", addl="Too many commas in typevars?")
current_obj.set_expecting(True)
elif ',' in token: # comma is part of another token
read_tight_code(token)
elif is_tvar(token): # no comma
current_obj.add_typevar(token)
current_obj.set_expecting(False)
elif '(' in token:
if current_obj.expecting_more_vars:
throw_error("Syntax error", addl="Expecting another typevar")
else:
if '(' == token:
#expecting = expecting[1:] # rest of fundec is formals, possibly rtype
expecting.insert(0, "<formals>")
else:
read_tight_code(token)
elif ':' in token:
if ':' is token:
expecting.insert(0, "<rtype>")
else:
read_tight_code(token)
elif '{' in token:
if '{' is token:
expecting.insert(0, "<block>")
if current_obj:
push_stack()
current_obj = Block()
current_obj_type = "Block"
else:
read_tight_code(token)
elif '}' is token:
expecting = expecting[1:] # end of fundec
fd_obj = current_obj
pop_stack()
current_obj.add_dec(fd_obj)
else:
throw_error("Syntax error in <fundec>")
def handle_exp(token, end):
global expecting
global exp_grammar_stack
global current_obj
global current_obj_type
if current_obj_type != "Exp":
if current_obj:
push_stack()
current_obj = Exp()
current_obj_type = "Exp"
if len(token) > 1:
for c in ['(',')', '[', ']', ',']:
if c in token:
read_tight_code(token)
return
if ';' in token:
if ';' == token:
if ';' == end:
current_obj.compile()
# add exp to its parent object
exp_obj = current_obj
pop_stack()
current_obj.add_exp(exp_obj)
expecting = expecting[1:] # exp finished
else:
read_tight_code(token)
return
else:
current_obj.raw_append(token) # if we got here, no special chars, just add token
else:
# single character
if token == '(':
exp_grammar_stack.insert(0, token)
current_obj.raw_append(token) # single char is part of exp
elif token == '[':
exp_grammar_stack.insert(0, token)
current_obj.raw_append(token) # single char is part of exp
elif token == ')':
if len(exp_grammar_stack) == 0:
if token == end:
current_obj.compile()
# add exp to its parent object
exp_obj = current_obj
pop_stack()
current_obj.add_exp(exp_obj)
expecting = expecting[1:] # exp finished
else:
throw_error("Syntax error while parsing <exp>", addl="Mismatching parentheses: " + str(current_obj.raw))
else:
if '(' != exp_grammar_stack.pop(0):
throw_error("Syntax error while parsing <exp>", addl="Mismatching parentheses: " + str(current_obj.raw))
else:
current_obj.raw_append(token) # single char is part of exp
elif token == ']':
if len(exp_grammar_stack) == 0:
if token == end:
current_obj.compile()
# add exp to its parent object
exp_obj = current_obj
pop_stack()
current_obj.add_exp(exp_obj)
expecting = expecting[1:] # exp finished
else:
throw_error("Syntax error while parsing <exp>", addl="Mismatching brackets: " + str(current_obj.raw))
else:
if '[' != exp_grammar_stack.pop(0):
throw_error("Syntax error while parsing <exp>", addl="Mismatching brackets: " + str(current_obj.raw))
else:
current_obj.raw_append(token) # single char is part of exp
elif token == ';':
if len(exp_grammar_stack) == 0:
current_obj.compile()
# add exp to its parent object
exp_obj = current_obj
pop_stack()
current_obj.add_exp(exp_obj)
expecting = expecting[1:] # exp finished
else:
throw_error("Syntax error while parsing <exp>", addl="Mismatching parentheses or brackets: " + str(current_obj.raw))
else:
current_obj.raw_append(token) # single char is part of exp
def handle_exp_semi(token):
handle_exp(token, ';')
def handle_exp_paren(token):
handle_exp(token, ')')
def handle_exp_bracket(token):
handle_exp(token, ']')
def handle_block(token):
global expecting
global current_obj
global current_obj_type
if current_obj_type != "Block":
if current_obj:
push_stack()
current_obj = Block()
current_obj_type = "Block"
else:
if current_obj.dec_phase:
if "fun" is token:
expecting.insert(0, "<fundec>")
if current_obj:
push_stack()
current_obj = Fundec()
current_obj_type = "Fundec"
elif is_type(token):
expecting.insert(0, "<vardec>")
if current_obj:
push_stack()
current_obj = Vardec()
current_obj_type = "Vardec"
add_to_ast(token) # return to handler
else:
current_obj.end_decs()
add_to_ast(token) # return to handler
elif '}' in token: # stms expected until }
if '}' is token:
# add block to its parent object
block_obj = current_obj
pop_stack()
current_obj.add_block(block_obj)
expecting = expecting[1:] # block finished
add_to_ast(token) # don't consume token
else:
read_tight_code(token)
else:
expecting.insert(0, "<stm>")
if current_obj:
push_stack()
current_obj = Stm()
current_obj_type = "Stm"
add_to_ast(token)
# redirect to more specific stm handlers based on first token of stm
def handle_stm(token):
global expecting
global current_obj
global current_obj_type
global stms
if current_obj is None: # assume last stm of program
current_obj = Stm()
current_obj_type = "Stm"
current_obj.independent = True
stms.append(current_obj)
elif current_obj_type != "Stm":
push_stack()
current_obj = Stm()
current_obj_type = "Stm"
assert_obj_type("Stm")
handled = False
for key in STM_REDIRECTS.keys():
if key in token:
handled = True
if key == token:
expecting[0] = STM_REDIRECTS[key]
if key == "{" and current_obj.independent:
current_obj.set_style("block")
expecting.insert(1, "<stm-finish>")
consume_tokens = ["return", "if", "while", "for", "halt"]
if key not in consume_tokens:
add_to_ast(token) # return to handler
else:
read_tight_code(token)
if not handled:
expecting[0] = "<stm-exp>"
add_to_ast(token)
def handle_stm_finish(token):
global expecting
if token == '}':
expecting = expecting[1:]
else:
throw_error("Expected end of block")
def handle_stm_empty(token):
global expecting
global current_obj
global current_obj_type
current_obj.style = "empty"
# add stm to its parent object
if not current_obj.independent:
stm_obj = current_obj
pop_stack()
current_obj.add_stm(stm_obj)
expecting = expecting[1:]
def handle_stm_finally(token):
# fixme error if this is the last part of the program?
# add stm to its parent object
global expecting
expecting = expecting[1:]
if not current_obj.independent:
stm_obj = current_obj
pop_stack()
current_obj.add_stm(stm_obj)
add_to_ast(token)
def handle_stm_if(token):
global expecting
global current_obj
global current_obj_type
# if has been consumed
if '(' in token and expecting[0] == "<stm-if>":
if '(' == token:
current_obj.set_style("if")
expecting[0] = "<stm-then>"
expecting.insert(0, "<exp-paren>")
else:
read_tight_code(token)
elif expecting[0] == "<stm-then>":
expecting[0] = "<stm-else>"
expecting.insert(0, "<stm>")
add_to_ast(token)
elif expecting[0] == "<stm-else>" and token == "else":
expecting[0] = "<stm-finally>"
expecting.insert(0, "<stm>")
else:
throw_error("Syntax error parsing <stm-if>")
def handle_stm_while(token):
global expecting
global current_obj
global current_obj_type
# while has been consumed
if '(' in token and expecting[0] == "<stm-while>":
if '(' == token:
current_obj.set_style("while")
expecting[0] = "<exp-paren>"
expecting.insert(0, "<stm-finally>")
else:
read_tight_code(token)
else:
throw_error("Syntax error parsing <stm-while>")
def handle_stm_for(token):
global expecting
global current_obj
global current_obj_type
throw_error("Parser not defined for syntax <stm-for>")
# for has been consumed
if '(' in token and expecting[0] == "<stm-for>":
if '(' == token:
current_obj.set_style("for")
expecting[0] = "<stm-finally>"
expecting.insert(0, "<exp-paren>")
expecting.insert(0, "<exp-semi>")
expecting.insert(0, "<vardec>")
else:
read_tight_code(token)
else:
throw_error("Syntax error parsing <stm-for>")
def handle_stm_exp(token):
global expecting
global current_obj
global current_obj_type
if expecting[0] == "<stm-exp>":
current_obj.set_style("exp")
expecting[0] = "<stm-exp-rest>"
expecting.insert(0, "<exp-semi>")
add_to_ast(token)
elif expecting[0] == "<stm-exp-rest>":
expecting = expecting[1:]
# add stm to its parent object
if not current_obj.independent:
stm_obj = current_obj
pop_stack()
current_obj.add_stm(stm_obj)
add_to_ast(token)
else:
throw_error("Syntax error while parsing <stm> :: <exp> ;")
def handle_stm_return(token):
global expecting
global current_obj
global current_obj_type
if ';' == token:
current_obj.set_style("return0")
expecting = expecting[1:] # consume ;
else:
current_obj.set_style("return")
expecting[0] = "<exp-semi>"
add_to_ast(token)
# fixme not always stm?
# add stm to its parent object
if current_obj_type == "Stm" and not current_obj.independent:
stm_obj = current_obj
pop_stack()
current_obj.add_stm(stm_obj)
def handle_stm_halt(token):
# halt has been consumed
if '(' in token:
if '(' == token:
if expecting[0] == "<stm-halt>":
current_obj.set_style("halt")
expecting[0] = "<stm-halt-rest>"
expecting.insert(0, "<exp-paren>")
else:
throw_error("Syntax error parsing <stm-halt>")
else:
read_tight_code(token)
elif expecting[0] == "<stm-halt-rest>" and token == ';':
expecting = expecting[1:]
# add stm to its parent object
if not current_obj.independent:
stm_obj = current_obj
pop_stack()
current_obj.add_stm(stm_obj)
add_to_ast(token)
else:
throw_error("Syntax error parsing <stm-halt>")
############################
### VALIDITY CHECKERS ######
############################
def is_rtype(token):
return token is "void" or is_type(token)
def is_valid_char(c, mustbe=[], cantbe=[]):
restrictions = copy.copy(cantbe)
if mustbe != []:
options = ["digit", "lower", "upper", "_", "print"]
for opt in options:
if opt not in mustbe:
restrictions.append(opt)
if c.isdigit() and "digit" not in restrictions:
return True
if c.isalpha():
if c.islower() and "lower" not in restrictions:
return True
elif c.isupper() and "upper" not in restrictions:
return True
if c == "_" and "_" not in restrictions:
return True
if c in PRINTABLES and "print" not in restrictions:
return True
return False
def is_id(token, proto=False, permissive=False):
valid = True
tok = token
if '<' in token:
if '>' == token[-1]:
for t in token[token.find('<')+1:-1].split(','):
valid = valid and (is_type(t) or proto or is_id(t,permissive=True))
tok = token[:token.find('<')]
else:
return False
if not permissive:
valid = valid and is_valid_char(tok[0], mustbe=["lower"])
else:
valid = valid and is_valid_char(tok[0], cantbe=["digit", "print", "_"])
if len(tok) > 1:
for c in tok[1:]:
valid = valid and is_valid_char(c, cantbe=["print"]) # subsequent
valid = valid and not is_reserved(tok)
return valid
def is_reserved(token):
return token in RESERVED
def is_tvar(token):
valid = is_valid_char(token[0], mustbe=["upper"])
if len(token) > 1:
for c in token[1:]:
valid = valid and is_valid_char(c, cantbe=["print"]) # subsequent
return valid
def is_intliteral(token):
valid = is_valid_char(token[0], mustbe=["digit"])
if len(token) > 1:
for c in token[1:]:
valid = valid and is_valid_char(c, mustbe=["digit"])
return valid
def is_floatliteral(token):
if token.find('.') == -1 or len(token) < 2: # can't be just '.'
return False
else:
before_dot = token[:token.find('.')]
after_dot = token[(token.find('.')+1):]
before_valid = len(before_dot) == 0 or is_intliteral(before_dot)
if after_dot.find('e') == -1:
after_valid = len(after_dot) == 0 or is_intliteral(after_dot)
else:
before_e = after_dot[:token.find('e')]
after_e = after_dot[token.find('e'):]
before_e_valid = len(before_e) == 0 or is_intliteral(before_e)
e_valid = (after_e[0] == "+" and len(after_e) > 1) \
or (after_e[0] == "-" and len(after_e) > 1) or is_intliteral(after_e[0])
if len(after_e) > 1:
e_valid = e_valid and after_e[1:]
after_valid = before_e_valid and e_valid
return before_valid and after_valid
def is_stringliteral(token):
str = token[1:-1]
if not (token.startswith("\"") and token.endswith("\"")):
return False
valid = True
for c in str:
valid = valid and is_valid_char(c)
return valid
def is_charliteral(token):
return len(token) == 3 and token[0] == "\'" and token[2] == "\'" \
and is_valid_char(token[1])
def is_literal(token):
if token == "null":
return True
elif token == "true":
return True
elif token == "false":
return True
else:
return is_charliteral(token) or is_stringliteral(token) \
or is_intliteral(token) or is_floatliteral(token)
def add_typeid(token):
global typeids
typeids.append(token)
if DEBUG_LEVEL > 1.5:
print("DEBUG: Added typeid " + token)
def is_typeid(token):
return token in typeids
def is_typeapp(token):
# fixme doesn't handle <typeid> <<types>>
return is_typeid(token) or is_tvar(token)
# Check whether a token is a valid type
def is_type(token, permissive=False): # fixme
valid = False
angle_valid = True
t2_valid = True
type_token = token
if is_typeid(token):
return True
# Generally handle "function" types
if token.startswith('(') and token.endswith(')'):
type_token = token[1:-1]
for a in type_token.split(','):
t2_valid = t2_valid and is_type(a)
if token.startswith("function("):
arrow_index = token.find(ARROW)
type_token = token[9:arrow_index]
type2 = token[arrow_index+2:-1]
t2_valid = t2_valid and is_type(type2)
# Generally handle <T> types
elif '<' in token:
if '>' == token[-1]:
angle_type = token[token.find('<')+1:-1]
type_token = token[:token.find('<')]
for a in angle_type.split(','):
angle_valid = angle_valid and is_type(a, permissive=True)
else:
angle_valid = False
# Remove extra ()
if type_token.startswith('(') and type_token.endswith(')'):
type_token = type_token[1:-1]
for a in type_token.split(','):
t2_valid = t2_valid and is_type(a)
# Check basic type
valid = valid or type_token in PRIMTYPES
valid = valid or is_typeapp(type_token)
if permissive:
valid = valid or is_id(type_token, permissive=True)
# array, can be <type> []
# can be <type><T>
# can be function ( ( <types> ) ARROW <rtype> )
return valid and angle_valid and t2_valid
TOKEN_TO_HANDLER = {
"<protodecs>" : handle_protodecs,
"<protodec>" : handle_protodec,
"<classdecs>" : handle_classdecs,
"<classdec>" : handle_classdec,
"<funprotos>" : handle_funprotos,
"<funproto>" : handle_funproto,
"<formals>" : handle_formals,
"<formals-rest>" : handle_formals,
"<formal>" : handle_formal,
"<rtype>" : handle_rtype,
"<implements>" : handle_implements,
"<implements-rest>" : handle_implements,
"<classbody>" : handle_classbody,
"<bodydecs>" : handle_bodydecs,
"<bodydecs-plus-bracket>" : handle_bodydecs_plus_bracket,
"<constdec>" : handle_constdec,
"<globaldec>" : handle_globaldec,
"<fielddec>" : handle_fielddec,
"<fundec>" : handle_fundec,
"<vardec>" : handle_vardec,
"<exp-semi>" : handle_exp_semi,
"<exp-paren>" : handle_exp_paren,
"<exp-bracket>" : handle_exp_bracket,
"<stm>" : handle_stm,
"<stm-empty>" : handle_stm_empty,
"<stm-if>" : handle_stm_if,
"<stm-then>" : handle_stm_if,
"<stm-else>" : handle_stm_if,
"<stm-while>" : handle_stm_while,
"<stm-for>" : handle_stm_for,
"<stm-finally>" : handle_stm_finally,
"<stm-ret>" : handle_stm_return,
"<stm-halt>" : handle_stm_halt,
"<stm-halt-rest>" : handle_stm_halt,
"<stm-exp>" : handle_stm_exp,
"<stm-exp-rest>" : handle_stm_exp,
"<stm-finish>" : handle_stm_finish,
"<block>" : handle_block,
}
def main():
import argparse
global DEBUG_LEVEL
global INDENTATION
prog_desc = "Quirk 24 parser by <NAME>"
parser = argparse.ArgumentParser(description=prog_desc)
parser.add_argument('input', help="Input file name")
parser.add_argument('output', help="Output file name")
parser.add_argument('--indentoff', action='store_true', help="Set output to be an unindented AST (default indented)")
parser.add_argument('-debug', default=0, help="Level of debug info, from 0-3")
args = parser.parse_args()
DEBUG_LEVEL = float(args.debug)
if args.indentoff:
INDENTATION = ""
run(args.input, args.output)
if '__main__' == __name__:
main()
```
#### File: joshmiller17/pl_parser/unindent.py
```python
from __future__ import print_function
def run(input, output, all):
import os
out_file = open(output, 'w')
with open(input, 'r') as in_file:
line = in_file.readline()
while (line):
if all:
print("".join(line.strip()), file=out_file, end='\n')
else:
print(line.lstrip(), file=out_file, end='')
line = in_file.readline()
out_file.close()
def main():
import argparse
prog_desc = "Whitespace stripper by <NAME>"
parser = argparse.ArgumentParser(description=prog_desc)
parser.add_argument('input', help="Input file name")
parser.add_argument('output', help="Output file name")
parser.add_argument('--all', action='store_true', help="Remove all whitespace")
args = parser.parse_args()
run(args.input, args.output, args.all)
if '__main__' == __name__:
main()
``` |
{
"source": "joshmiller17/spirecomm",
"score": 3
} |
#### File: spirecomm/communication/coordinator.py
```python
import sys
import queue
import threading
import json
import collections
import time
from spirecomm.spire.game import Game
from spirecomm.spire.screen import ScreenType
from spirecomm.communication.action import Action, StartGameAction
def read_stdin(input_queue):
"""Read lines from stdin and write them to a queue
:param input_queue: A queue, to which lines from stdin will be written
:type input_queue: queue.Queue
:return: None
"""
while True:
stdin_input = ""
#print("Communicator: read_stdin", file=self.logfile, flush=True)
while True:
input_char = sys.stdin.read(1)
if input_char == '\n':
break
else:
stdin_input += input_char
input_queue.put(stdin_input)
def write_stdout(output_queue):
"""Read lines from a queue and write them to stdout
:param output_queue: A queue, from which this function will receive lines of text
:type output_queue: queue.Queue
:return: None
"""
while True:
output = output_queue.get()
print(output, end='\n', flush=True)
class Coordinator:
"""An object to coordinate communication with Slay the Spire"""
def __init__(self):
self.input_queue = queue.Queue()
self.output_queue = queue.Queue()
self.actions_played_queue = queue.Queue()
self.input_thread = threading.Thread(target=read_stdin, args=(self.input_queue,))
self.output_thread = threading.Thread(target=write_stdout, args=(self.output_queue,))
self.input_thread.daemon = True
self.input_thread.start()
self.output_thread.daemon = True
self.output_thread.start()
self.action_queue = collections.deque()
self.state_change_callback = None
self.out_of_game_callback = None
self.error_callback = None
self.game_is_ready = False
self.stop_after_run = False
self.in_game = False
self.last_game_state = None
self.last_error = None
self.last_msg = ""
self.last_action = None
self.logfile = open("ai_comm.log","w")
print("Communicator: Init ", file=self.logfile, flush=True)
def signal_ready(self):
"""Indicate to Communication Mod that setup is complete
Must be used once, before any other commands can be sent.
:return: None
"""
print("Communicator: signal_ready", file=self.logfile, flush=True)
self.send_message("ready")
def send_message(self, message):
"""Send a command to Communication Mod and start waiting for a response
:param message: the message to send
:type message: str
:return: None
"""
self.output_queue.put(message)
self.game_is_ready = False
def add_action_to_queue(self, action):
"""Queue an action to perform when ready
:param action: the action to queue
:type action: Action
:return: None
"""
self.action_queue.append(action)
def clear_actions(self):
"""Remove all actions from the action queue
:return: None
"""
self.action_queue.clear()
def execute_next_action(self):
"""Immediately execute the next action in the action queue
:return: None
"""
action = self.action_queue.popleft()
self.last_action = action
self.actions_played_queue.put(action)
action.execute(self)
def re_execute_last_action(self):
self.actions_played_queue.put(self.last_action)
self.last_action.execute(self)
def execute_next_action_if_ready(self):
"""Immediately execute the next action in the action queue, if ready to do so
:return: None
"""
if len(self.action_queue) > 0 and self.action_queue[0].can_be_executed(self):
self.execute_next_action()
def register_state_change_callback(self, new_callback):
"""Register a function to be called when a message is received from Communication Mod
:param new_callback: the function to call
:type new_callback: function(game_state: Game) -> Action
:return: None
"""
print("Communicator: register_state_change_callback", file=self.logfile, flush=True)
self.state_change_callback = new_callback
def register_command_error_callback(self, new_callback):
"""Register a function to be called when an error is received from Communication Mod
:param new_callback: the function to call
:type new_callback: function(error: str) -> Action
:return: None
"""
print("Communicator: register_command_error_callback", file=self.logfile, flush=True)
self.error_callback = new_callback
def register_out_of_game_callback(self, new_callback):
"""Register a function to be called when Communication Mod indicates we are in the main menu
:param new_callback: the function to call
:type new_callback: function() -> Action
:return: None
"""
print("Communicator: register_out_of_game_callback", file=self.logfile, flush=True)
self.out_of_game_callback = new_callback
def view_last_msg(self):
return self.last_msg
def get_action_played(self):
if not self.actions_played_queue.empty():
return self.actions_played_queue.get()
def get_next_raw_message(self, block=False):
"""Get the next message from Communication Mod as a string
:param block: set to True to wait for the next message
:type block: bool
:return: the message from Communication Mod
:rtype: str
"""
if block or not self.input_queue.empty():
self.last_msg = self.input_queue.get()
return self.last_msg
def receive_game_state_update(self, block=False, perform_callbacks=True, repeat=False):
"""Using the next message from Communication Mod, update the stored game state
:param block: set to True to wait for the next message
:type block: bool
:param perform_callbacks: set to True to perform callbacks based on the new game state
:type perform_callbacks: bool
:return: whether a message was received
"""
message = ""
if repeat:
message = self.last_msg
else:
message = self.get_next_raw_message(block)
if message is not None:
communication_state = json.loads(message)
self.last_error = communication_state.get("error", None)
self.game_is_ready = communication_state.get("ready_for_command")
if self.last_error is None:
self.in_game = communication_state.get("in_game")
if self.in_game:
self.last_game_state = Game.from_json(communication_state.get("game_state"), communication_state.get("available_commands"))
else:
print("Communicator detected error", file=self.logfile, flush=True)
if perform_callbacks:
if self.last_error is not None:
self.action_queue.clear()
new_action = self.error_callback(self.last_error)
self.add_action_to_queue(new_action)
elif self.in_game:
if len(self.action_queue) == 0 and perform_callbacks:
#print(str(self.last_game_state), file=self.logfile, flush=True)
new_action = self.state_change_callback(self.last_game_state)
self.add_action_to_queue(new_action)
elif self.stop_after_run:
self.clear_actions()
else:
new_action = self.out_of_game_callback()
self.add_action_to_queue(new_action)
return True
return False
def unpause_agent(self):
print("Communicator: game update " + str(time.time()), file=self.logfile, flush=True)
print("Communicator's game state:", file=self.logfile, flush=True)
print(str(self.last_game_state), file=self.logfile, flush=True)
self.last_game_state = Game.from_json(communication_state.get("game_state"), communication_state.get("available_commands"))
self.receive_game_state_update()
def run(self):
"""Start executing actions forever
:return: None
"""
print("Communicator: run", file=self.logfile, flush=True)
while True:
self.execute_next_action_if_ready()
self.receive_game_state_update(perform_callbacks=True)
def play_one_game(self, player_class, ascension_level=0, seed=None):
"""
:param player_class: the class to play
:type player_class: PlayerClass
:param ascension_level: the ascension level to use
:type ascension_level: int
:param seed: the alphanumeric seed to use
:type seed: str
:return: True if the game was a victory, else False
:rtype: bool
"""
print("Communicator: play_one_game", file=self.logfile, flush=True)
self.clear_actions()
while not self.game_is_ready:
self.receive_game_state_update(block=True, perform_callbacks=False)
if not self.in_game:
StartGameAction(player_class, ascension_level, seed).execute(self)
self.receive_game_state_update(block=True)
while self.in_game:
self.execute_next_action_if_ready()
self.receive_game_state_update()
if self.last_game_state.screen_type == ScreenType.GAME_OVER:
return self.last_game_state.screen.victory
else:
return False
```
#### File: spirecomm/spire/character.py
```python
from enum import Enum
import json
from spirecomm.spire.power import Power
class Intent(Enum):
ATTACK = 1
ATTACK_BUFF = 2
ATTACK_DEBUFF = 3
ATTACK_DEFEND = 4
BUFF = 5
DEBUFF = 6
STRONG_DEBUFF = 7
DEBUG = 8
DEFEND = 9
DEFEND_DEBUFF = 10
DEFEND_BUFF = 11
ESCAPE = 12
MAGIC = 13
NONE = 14
SLEEP = 15
STUN = 16
UNKNOWN = 17
def is_attack(self):
return self in [Intent.ATTACK, Intent.ATTACK_BUFF, Intent.ATTACK_DEBUFF, Intent.ATTACK_DEFEND]
class PlayerClass(Enum):
IRONCLAD = 1
THE_SILENT = 2
DEFECT = 3
class Orb:
def __init__(self, name, orb_id, evoke_amount, passive_amount):
self.name = name
self.orb_id = orb_id
self.evoke_amount = evoke_amount
self.passive_amount = passive_amount
@classmethod
def from_json(cls, json_object):
name = json_object.get("name")
orb_id = json_object.get("id")
evoke_amount = json_object.get("evoke_amount")
passive_amount = json_object.get("passive_amount")
orb = Orb(name, orb_id, evoke_amount, passive_amount)
return orb
class Character:
def __init__(self, max_hp, current_hp=None, block=0):
self.max_hp = max_hp
self.current_hp = current_hp
if self.current_hp is None:
self.current_hp = self.max_hp
self.block = block
self.powers = []
class Player(Character):
def __init__(self, max_hp, current_hp=None, block=0, energy=0):
super().__init__(max_hp, current_hp, block)
self.energy = energy
self.orbs = []
@classmethod
def from_json(cls, json_object):
player = cls(json_object["max_hp"], json_object["current_hp"], json_object["block"], json_object["energy"])
player.powers = [Power.from_json(json_power) for json_power in json_object["powers"]]
player.orbs = [Orb.from_json(orb) for orb in json_object["orbs"]]
return player
class Monster(Character):
def __init__(self, name, monster_id, max_hp, current_hp, block, intent, half_dead, is_gone, move_id=-1, move_base_damage=0, move_adjusted_damage=0, move_hits=0):
super().__init__(max_hp, current_hp, block)
self.name = name
self.monster_id = monster_id
self.intent = intent
self.half_dead = half_dead
self.is_gone = is_gone # dead or out of combat
self.move_id = move_id
self.move_base_damage = move_base_damage
self.move_adjusted_damage = move_adjusted_damage
self.move_hits = move_hits
self.monster_index = 0
# Load from monsters/[name].json
'''
Move format
name : effects (list)
Effect format
(name, value)
e.g. Damage, 10; Vulnerable, 2
'''
self.moves = {}
'''
States format
state : { transition: [(new state, probability), ...], moveset: [(move, probability), ...]}
Always starts in state 1
states dict lists probability to transition to other states
TODO some enemies transition on trigger condition, like half health
'''
self.states = {}
try:
with open(os.path.join("monsters", self.name + ".json"),"r") as f:
jsonDict = json.load(f)
self.states = jsonDict["states"]
self.moves = jsonDict["moves"]
except Exception as e:
with open('err.log', 'a+') as err_file:
err_file.write("Monster Error: " + str(self.name))
err_file.write(e)
#raise Exception(e)
@classmethod
def from_json(cls, json_object):
name = json_object["name"]
monster_id = json_object["id"]
max_hp = json_object["max_hp"]
current_hp = json_object["current_hp"]
block = json_object["block"]
intent = Intent[json_object["intent"]]
half_dead = json_object["half_dead"]
is_gone = json_object["is_gone"]
move_id = json_object.get("move_id", -1)
move_base_damage = json_object.get("move_base_damage", 0)
move_adjusted_damage = json_object.get("move_adjusted_damage", 0)
move_hits = json_object.get("move_hits", 0)
monster = cls(name, monster_id, max_hp, current_hp, block, intent, half_dead, is_gone, move_id, move_base_damage, move_adjusted_damage, move_hits)
monster.powers = [Power.from_json(json_power) for json_power in json_object["powers"]]
return monster
def __eq__(self, other):
if self.name == other.name and self.current_hp == other.current_hp and self.max_hp == other.max_hp and self.block == other.block:
if len(self.powers) == len(other.powers):
for i in range(len(self.powers)):
if self.powers[i] != other.powers[i]:
return False
return True
return False
```
#### File: spirecomm/utilities/simple_gui.py
```python
import os
import collections
import itertools
import datetime
import sys
import time
import traceback
import threading
#import spirecomm
#print(spirecomm.__file__)
import spirecomm.spire.card
import spirecomm.communication.coordinator as coord
from spirecomm.ai.agent import SimpleAgent
from spirecomm.spire.character import PlayerClass
os.environ["KIVY_NO_CONSOLELOG"] = "1"
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.core.window import Window
class Base(BoxLayout):
def __init__(self, coordinator, agent, f):
super().__init__(orientation='vertical')
self.coordinator = coordinator
self.agent = agent
self.log = f
self.last_comm = ""
self.step = False # whether to Run or Step when pressing Resume
self.sleeping = False
self.z_count = 0
print("Base: Init", file=self.log, flush=True)
self.input_text = TextInput(size_hint=(2, 7))
self.input_text.text = ""
self.input_text.foreground_color = (1, 1, 1, 1)
self.input_text.background_color = (.1, .1, .1, 1)
self.input_text.readonly = True
self.max_in_history_lines = 15
self.in_history = collections.deque(maxlen=self.max_in_history_lines)
self.add_widget(self.input_text)
self.out_history_text = TextInput(size_hint=(1, 2))
self.out_history_text.readonly = True
self.out_history_text.foreground_color = (1, 1, 1, 1)
self.out_history_text.background_color = (.1, .1, .1, 1)
self.add_widget(self.out_history_text)
self.output_text = TextInput(size_hint=(1, 1))
self.output_text.foreground_color = (1, 1, 1, 1)
self.output_text.background_color = (.1, .1, .1, 1)
self.add_widget(self.output_text)
self.button = Button(text='Send', size_hint=(1, 1))
self.button.bind(on_press=self.send_output)
self.add_widget(self.button)
self.pause = Button(text='Pause', size_hint=(1, 1))
self.pause.bind(on_press=self.do_pause)
self.add_widget(self.pause)
self.resume = Button(text='Resume', size_hint=(1, 1))
self.resume.bind(on_press=self.do_resume)
self.add_widget(self.resume)
self.max_out_history_lines = 5
self.out_history_lines = collections.deque(maxlen=self.max_out_history_lines)
Window.bind(on_key_up=self.key_callback)
def do_communication(self, dt):
new_msg = str(self.agent.get_next_msg())
if new_msg != "":
if new_msg == 'z':
self.sleeping = True
self.z_count += 1
else:
self.sleeping = False
self.z_count = 0
msgs = new_msg.split('\n')
for m in msgs:
self.in_history.append(m)
self.input_text.text = "\n".join(self.in_history)
if self.sleeping:
self.input_text.text += "\n" + 'z' * (1 + self.z_count % 3)
action_msg = self.coordinator.get_action_played()
def do_pause(self, instance=None):
self.agent.pause()
def do_resume(self, instance=None):
if not self.step:
self.agent.resume()
else:
self.agent.take_step()
def send_output(self, instance=None, text=None):
if text is None:
text = self.output_text.text
text = text.strip()
if not self.handle_debug_cmds(text):
print(text, end='\n', flush=True)
self.out_history_lines.append(text)
self.out_history_text.text = "\n".join(self.out_history_lines)
self.output_text.text = ""
def key_callback(self, window, keycode, *args):
if keycode == 13:
self.send_output()
# Returns True if message was a debug command to execute,
# False if we should print out for CommMod
def handle_debug_cmds(self, msg):
# testbed
if msg == "test":
spirecomm.spire.card.Card("0", "Strike", "Attack", "Common")
return True
if msg == "threadcheck":
for thread in threading.enumerate():
print(thread, file=self.log, flush=True)
print(thread.isAlive(), file=self.log, flush=True)
self.in_history.append(str(thread) + str(thread.isAlive()))
return True
if msg == "step":
self.step = not self.step
self.in_history.append("Step mode: " + ("ON" if self.step else "OFF"))
return True
if msg == "write":
self.agent.tree_to_json("tree.json")
self.in_history.append("Behaviour tree saved to tree.json")
return True
elif msg.startswith("write "):
filename = msg[6:]
self.agent.tree_to_json(filename + ".json")
self.in_history.append("Behaviour tree saved to " + filename + ".json")
return True
if msg == "tree":
self.agent.print_tree()
return True
if msg == "load":
msg = "load tree"
if msg.startswith("load "):
filename = msg[5:]
try:
self.in_history.append("Loading " + filename + ".json")
self.agent.json_to_tree(filename + ".json")
except Exception as e:
print(e, file=self.log, flush=True)
return True
if msg.startswith("delay "):
try:
self.agent.action_delay = float(msg[6:])
self.in_history.append("DELAY SET TO " + str(self.agent.action_delay))
return True
except Exception as e:
print(e, file=self.log, flush=True)
if msg.startswith("debug "):
try:
self.agent.debug_level = int(msg[6:])
self.in_history.append("DEBUG SET TO " + str(self.agent.debug_level))
return True
except Exception as e:
print(e, file=self.log, flush=True)
if msg == "clear":
self.input_text.text = ""
return True
return False
class CommunicationApp(App):
def __init__(self, coordinator, agent, f):
super().__init__()
self.coordinator = coordinator
self.agent = agent
self.log = f
print("Kivy: Init", file=self.log, flush=True)
def build(self):
base = Base(self.coordinator, self.agent, self.log)
Clock.schedule_interval(base.do_communication, 1.0 / 120.0)
return base
def run_agent(f, communication_coordinator):
# TEST
print("Agent: preparing profiler test", file=f, flush=True)
try:
# import io, cProfile, pstats
# pr = cProfile.Profile()
# pr.enable()
# s = io.StringIO()
# print("Agent: init profiler test", file=f, flush=True)
result = communication_coordinator.play_one_game(PlayerClass.IRONCLAD)
print("Agent: first game ended in {}" "victory" if result else "defeat", file=f, flush=True)
# print("Agent: finishing profiler test", file=f, flush=True)
# pr.disable()
# sortby = 'cumulative'
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
# ps.print_stats()
# print(s.getvalue(), file=f, flush=True)
except Exception as e:
print("Agent thread encountered error:", file=f, flush=True)
print(e, file=f, flush=True)
# return # END TEST
#Play games forever, cycling through the various classes
for chosen_class in itertools.cycle(PlayerClass):
#agent.change_class(chosen_class)
print("Agent: new game", file=f, flush=True)
result = communication_coordinator.play_one_game(PlayerClass.IRONCLAD)
print("Agent: game ended in {}" "victory" if result else "defeat", file=f, flush=True)
#result = coordinator.play_one_game(chosen_class)
def launch_gui():
f=open("ai.log","w")
print("GUI: Init " + str(time.time()), file=f, flush=True)
agent = SimpleAgent(f)
print("GUI: Register agent", file=f, flush=True)
communication_coordinator = coord.Coordinator()
print("GUI: Register coordinator", file=f, flush=True)
communication_coordinator.signal_ready()
print("GUI: Ready", file=f, flush=True)
communication_coordinator.register_command_error_callback(agent.handle_error)
communication_coordinator.register_state_change_callback(agent.get_next_action_in_game)
communication_coordinator.register_out_of_game_callback(agent.get_next_action_out_of_game)
print("GUI: Registered coordinator actions", file=f, flush=True)
agent_thread = threading.Thread(target=run_agent, args=(f,communication_coordinator))
print("Agent thread is " + str(agent_thread), file=f, flush=True)
agent_thread.daemon = True
print("Agent: Init", file=f, flush=True)
agent_thread.start()
print("Agent: Starting", file=f, flush=True)
print("GUI: Starting mainloop", file=f, flush=True)
CommunicationApp(communication_coordinator, agent, f).run()
if __name__ == "__main__":
lf = open("err.log", "w")
try:
launch_gui()
except Exception as e:
print(traceback.format_exc(), file=lf, flush=True)
``` |
{
"source": "joshmiller17/venntbot",
"score": 3
} |
#### File: joshmiller17/venntbot/communication.py
```python
import discord
from discord.ext import commands
import random, d20, operator, time, time, asyncio
from enum import Enum
import importlib
db = importlib.import_module("db")
stats = importlib.import_module("stats")
meta = importlib.import_module("meta")
logClass = importlib.import_module("logger")
logger = logClass.Logger("communication")
COMM_STATE = None
ABILITY_LIST_CACHE = None
SECONDS_PER_MSG_BATCH = 1
COMM_BOT = None
CTX_TO_MSG = {} # ctx : message obj
MSGS_SENT = False
async def send(ctx, message):
if not COMM_BOT:
logger.err("send", "COMM_BOT not initialized")
if ctx not in COMM_BOT.message_queue:
COMM_BOT.message_queue[ctx] = []
COMM_BOT.message_queue[ctx].append(message)
# call this send instead when you need the Message obj back
async def send_and_return(ctx, message):
await send(ctx, message)
return await asyncio.create_task(get_message(ctx))
async def get_message(ctx):
global CTX_TO_MSG
wait_time = 0
while True:
if MSGS_SENT and ctx in CTX_TO_MSG:
ret = CTX_TO_MSG[ctx]
del CTX_TO_MSG[ctx]
return ret
await asyncio.sleep(0.5)
wait_time += 0.5
if wait_time > SECONDS_PER_MSG_BATCH * 3:
logger.err("get_message", "no message found")
return None
# split contents into messages < 2000 characters (Discord limit)
async def send_in_batches(ctx, msg_list):
global CTX_TO_MSG
if msg_list == []:
return
#logger.log("send_in_batches",str(COMM_BOT.message_queue))
msg_length = 0
msg = ""
for line in msg_list:
line_len = len(line)
if msg_length + line_len > 1999:
message_obj = await ctx.send(msg)
msg_length = 0
msg = ""
if msg != "":
msg += "\n"
msg += line
msg_length += line_len + 2 # newline
if msg_length > 0: # finally, send whatever is left
message_obj = await ctx.send(msg)
CTX_TO_MSG[ctx] = message_obj
async def make_choice_list(self, ctx, choices, offset):
choice_map = {}
count = 0
has_more = False
for c in choices:
if offset > 0:
offset -= 1
continue
if count > 8:
choice_map["More..."] = db.MORE
has_more = True
break
choice_map[c] = db.NUMBERS[count]
count += 1
ret = []
for c, emoji in choice_map.items():
ret.append("{0} {1}".format(emoji, c))
m = await send_and_return(ctx,"```\n{0}\n```".format("\n".join(ret)))
for i in range(count):
await m.add_reaction(db.NUMBERS[i])
if has_more:
await m.add_reaction(db.MORE)
db.QUICK_ACTION_MESSAGE = m
class Communication(commands.Cog):
"""Interface with the bot."""
def __init__(self, bot):
global COMM_BOT
self.bot = bot
COMM_BOT = self
self.enemy_list_offset = 0
self.ability_list_offset = 0
self.chosen_ability = None # stored for convenience
self.initCog = self.bot.get_cog('Initiative')
self.gm = self.bot.get_cog('GM')
self.message_queue = {} # ctx : msgs
self.scheduler = None
@commands.Cog.listener()
async def on_message(self, message):
if not self.scheduler:
self.scheduler = asyncio.create_task(self.schedule_messages(SECONDS_PER_MSG_BATCH))
async def schedule_messages(self, timeout):
global CTX_TO_MSG, MSGS_SENT
while True:
await asyncio.sleep(timeout)
MSGS_SENT = False
CTX_TO_MSG = {}
for ctx, msgs in self.message_queue.items():
await send_in_batches(ctx, msgs)
MSGS_SENT = True
self.message_queue = {}
async def remove_bot_reactions(self, message):
for reaction in message.reactions:
if reaction.me:
await reaction.remove(self.bot.user)
await self.remove_bot_reactions(message) # refresh list and try again
@commands.command(pass_context=True)
async def quick(self, ctx):
"""Show available quick actions."""
await suggest_quick_actions(ctx, db.find(self.initCog.whose_turn))
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
global COMM_STATE, ABILITY_LIST_CACHE
if user == self.bot.user:
return
if reaction.message == db.QUICK_ACTION_MESSAGE:
if self.initCog.whose_turn is None:
db.QUICK_CTX.send("Cannot process reactions, I don't know whose turn it is.")
return
if meta.get_character_name(user) == self.initCog.whose_turn or meta.get_character_name(user) == "GM":
await self.remove_bot_reactions(reaction.message)
who = self.initCog.whose_turn
who_ent = db.find(who)
if reaction.emoji == db.MORE:
if COMM_STATE == CommState.ATTACK:
await make_enemy_list(self, db.QUICK_CTX, self.enemy_list_offset)
elif COMM_STATE == CommState.ABILITIES:
await make_ability_list(self, db.QUICK_CTX, self.ability_list_offset)
else:
self.ability_list_offset = 0
self.chosen_ability = None
ABILITY_LIST_CACHE = None
await make_ability_list(self, db.QUICK_CTX, self.ability_list_offset)
if reaction.emoji == db.SWORDS:
self.enemy_list_offset = 0
await make_enemy_list(self, db.QUICK_CTX, self.enemy_list_offset)
if db.is_number_emoji(reaction.emoji):
if COMM_STATE == CommState.ATTACK:
self.enemy_list_offset -= 9
enemy_index = db.NUMBERS.index(reaction.emoji) + self.enemy_list_offset
target_ent = db.ENEMIES[enemy_index]
await self.gm.gm_attack(db.QUICK_CTX, who, target_ent.display_name(), who_ent.primary_weapon)
await suggest_quick_actions(db.QUICK_CTX, who_ent)
elif COMM_STATE == CommState.ABILITIES:
self.ability_list_offset -= 9
ability_index = db.NUMBERS.index(reaction.emoji) + self.ability_list_offset
abiObj = ABILITY_LIST_CACHE[ability_index]
self.chosen_ability = abiObj
if abiObj.is_spell:
await ask_cast_strength(db.QUICK_CTX)
else:
await self.gm.gm_use(db.QUICK_CTX, who, abiObj.name)
await suggest_quick_actions(db.QUICK_CTX, who_ent)
else:
raise ValueError("communication.on_reaction_add: CommState was None")
if reaction.emoji == db.FAST:
await self.gm.gm_cast(db.QUICK_CTX, who, 0, self.chosen_ability.name)
if reaction.emoji == db.MAGIC:
await self.gm.gm_cast(db.QUICK_CTX, who, 1, self.chosen_ability.name)
if reaction.emoji == db.POWERFUL:
await self.gm.gm_cast(db.QUICK_CTX, who, 2, self.chosen_ability.name)
if reaction.emoji == db.RUNNING:
success = await who_ent.use_resources_verbose(db.QUICK_CTX, {'A':1})
await db.QUICK_CTX.send(who_ent.display_name() + " moved.")
await suggest_quick_actions(db.QUICK_CTX, who_ent)
if reaction.emoji == db.REPEAT:
last_action = act.get_last_action(user=who_ent)
if last_action.type == act.ActionType.ABILITY:
await self.gm.gm_use(db.QUICK_CTX, who_ent.display_name(), last_action.description)
elif last_action.type == act.ActionType.SPELL:
await self.gm.gm_cast(db.QUICK_CTX, who_ent.display_name(), last_action.description)
else:
raise ValueError("communication.on_reaction_add: only ABILITY and SPELL action types are supported")
if reaction.emoji == db.SKIP:
await self.initCog.next_turn(db.QUICK_CTX)
if reaction.message == db.QUICK_REACTION_MESSAGE:
if self.initCog.whose_turn is None:
db.QUICK_CTX.send("Cannot process reactions, I don't know whose turn it is.")
return
if meta.get_character_name(user) == self.initCog.whose_turn or meta.get_character_name(user) == "GM":
await self.remove_bot_reactions(reaction.message)
who = self.initCog.whose_turn
who_ent = db.find(who)
if reaction.emoji == db.DASH:
dodged_action = act.get_last_action(type=act.ActionType.ATTACK, target=who_ent)
who_ent.add_resources_verbose(ctx, dodged_action.effects[act.ActionRole.TARGET])
who_ent.use_resources_verbose({'R':1})
if reaction.emoji == db.SHIELD:
blocked_action = act.get_last_action(type=act.ActionType.ATTACK, target=who_ent)
hp_lost = blocked_action.effects[act.ActionRole.TARGET]["HP"]
who_ent.add_resources_verbose(ctx, {"HP": hp_lost, "VIM" : -1 * hp_lost})
who_ent.use_resources_verbose({'R':1})
```
#### File: joshmiller17/venntbot/sheets.py
```python
import discord
from discord.ext import commands
import os, pickle, time
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import importlib
db = importlib.import_module("db")
stats = importlib.import_module("stats")
meta = importlib.import_module("meta")
communication = importlib.import_module("communication")
logClass = importlib.import_module("logger")
logger = logClass.Logger("sheets")
# style: globals are in all caps
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
STATS = {
"AGI": "B2", "CHA": "B3", "DEX": "B4",
"INT": "B5", "PER": "B6", "SPI": "B7",
"STR": "B8", "TEK": "B9", "WIS": "B10",
"HP": "B14", "MAX_HP": "C14", "MP": "B17",
"VIM": "B16", "ARMOR": "B21", "HERO": "B15",
}
READ_ONLY_STATS = {
"INIT" : "B19",
"SPEED" : "B20"
}
"""
# Google Sheets API Login
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
"""
def get_sheet_id(char_name):
for character in db.characters:
if character["name"] == char_name:
return character["ID"]
logger.err("get_sheet_id", "no sheet found for " + char_name)
return ""
def get_from_sheets(spreadsheet_id, sheet_range):
result = sheet.values().get(spreadsheetId=spreadsheet_id,
range=sheet_range).execute()
values = result.get('values', [])
return values
def update_to_sheets(spreadsheet_id, sheet_range, vs):
logger.log("update_to_sheets", "updating {0} in {1} with {2}".format(sheet_range, spreadsheet_id, vs))
body = {'values' : vs}
result = service.spreadsheets().values().update(
spreadsheetId=spreadsheet_id, range=sheet_range,
valueInputOption='RAW', body=body).execute()
async def set_stat(ctx, who, amount, stat): # internal call of set command
logger.log("set_stat", "Set stat " + who + " " + str(amount) + " " + stat)
stat = stat.upper()
if stat not in STATS.keys():
await communication.send(ctx,"Unknown stat: " + stat)
return
cell = "Stats!" + STATS[stat]
update_to_sheets(get_sheet_id(who), cell, amount)
await ctx.message.add_reaction(db.OK)
async def do_get(ctx, who, stat):
if stat not in STATS.keys() and stat not in READ_ONLY_STATS.keys():
await communication.send(ctx,"Unknown stat: " + stat)
return
if stat in STATS.keys():
cell = "Stats!" + STATS[stat]
else:
cell = "Stats!" + READ_ONLY_STATS[stat]
return int(get_from_sheets(get_sheet_id(who), cell)[0][0])
async def do_get_abilities(ctx, who):
skills = get_from_sheets(get_sheet_id(who), "Skills!A7:A1000")
skills = [s[0] for s in skills if len(s) > 0] # de-listify
return skills
async def do_get_inventory(ctx, who):
i_sheet = get_from_sheets(get_sheet_id(who), "Inventory!A1:D1000")
inventory = [i for i in i_sheet if len(i) > 1 and i[2] != ""] # de-listify
return inventory
class Sheets(commands.Cog):
"""Commands to modify character sheets."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def set_sheet(self, ctx, who, amount, stat):
"""Set a stat on a character sheet. Players can only use 'me' or their name."""
char_name = meta.get_character_name(ctx.message.author)
if char_name != "GM" and who != "me" and who != char_name:
await ctx.message.add_reaction(db.NOT_OK)
return
amount = [[int(amount)]]
await set_stat(ctx, who, amount, stat)
@commands.command(pass_context=True, aliases=['mod_sheet'])
async def modify_sheet(self, ctx, who, amount, stat):
"""Modify a stat on a character sheet."""
char_name = meta.get_character_name(ctx.message.author)
if char_name != "GM" and who != "me" and who != char_name:
ctx.message.add_reaction(db.NOT_OK)
return
amount = stats.clean_modifier(amount)
amount = amount + await get(self, ctx, who, stat)
await set_stat(ctx, who, amount, stat)
@commands.command(pass_context=True, aliases=['get_sheet'])
async def read_sheet(self, ctx, who, stat):
"""See a stat on a character sheet."""
await communication.send(ctx, await do_get(ctx, who, stat) )
# Save characters.json -> Google Sheet
@commands.command(pass_context=True)
async def save(self, ctx, player):
"""Save changes to character sheet, or 'all' for everyone."""
if player == 'all':
for p in db.get_player_names():
await self.save(ctx, p)
else:
e = db.find(player)
for stat in STATS.keys():
await set_stat(ctx, player, getattr(e, stat), stat)
# TODO save primary weapon info somewhere?
# TODO save newly acquired skills
# Load Google Sheet -> db -> characters.json
@commands.command(pass_context=True)
async def load(self, ctx, who):
"""Load a character sheet, or 'all' for everyone (takes several minutes)."""
await ctx.message.add_reaction(db.THINKING)
if who == 'all':
for p in db.get_player_names():
logger.log("load", "Loading " + p)
await self.load(ctx, p)
time.sleep(60) # need to be extra nice to the server, this makes a lot of calls
else:
e = db.find(who)
# STATS
for stat in STATS.keys():
e.attrs[stat] = await do_get(ctx, who, stat)
time.sleep(1)
# READ ONLY STATS
for stat in READ_ONLY_STATS.keys():
e.attrs[stat] = await do_get(ctx, who, stat)
time.sleep(1)
# SKILLS
e.skills = await do_get_abilities(ctx, who)
# INVENTORY
e.inventory = await do_get_inventory(ctx, who)
e.write() # write db -> characters.json
await ctx.message.add_reaction(db.OK)
await ctx.message.remove_reaction(db.THINKING, ctx.me)
``` |
{
"source": "josh-minch/newsfeed",
"score": 2
} |
#### File: newsfeed/newsfeed/celery.py
```python
from __future__ import absolute_import, unicode_literals
import os
import platform
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'newsfeed.settings')
# We must set the following env var to support running tasks on Windows
# See https://github.com/celery/celery/issues/4081
if platform.system() == 'Windows':
os.environ.setdefault('FORKED_BY_MULTIPROCESSING', '1')
app = Celery('newsfeed')
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
``` |
{
"source": "josh-minch/spartan",
"score": 2
} |
#### File: spartan/spartan/gui_helpers.py
```python
from PySide2.QtCore import Qt
from PySide2.QtGui import QFont
from PySide2.QtWidgets import QHeaderView
import gui_constants
def enumerate_cols(attrs):
col_to_attr, attr_to_col = {}, {}
for i, col_attr in enumerate(attrs):
col_to_attr[i] = col_attr
attr_to_col[col_attr] = i
return col_to_attr, attr_to_col
def hide_view_cols(view, cols_to_hide):
for col in cols_to_hide:
view.setColumnHidden(col, True)
# by default, Qt doesn't change a table's header to reflect that of its body
def fix_header_font(table_view, font_size, font_weight):
font = QFont(gui_constants.system_font_family(), font_size, font_weight)
table_view.horizontalHeader().setFont(font)
def vertical_resize_table_view_to_contents(table_view):
height = 0
for i in range(table_view.verticalHeader().count()):
height += table_view.verticalHeader().sectionSize(i)
'''
if table_view.horizontalScrollBar().isHidden() == False:
height += table_view.horizontalScrollBar().height()
'''
if table_view.horizontalHeader().isHidden() == False:
height += table_view.horizontalHeader().height()
table_view.setMinimumHeight(height + 3)
def set_column_widths(view, cols, col_widths):
for col, width in zip(cols, col_widths):
view.setColumnWidth(col, width)
def set_header_font(table_view, size, weight):
font = QFont(gui_constants.system_font_family(),
size,
weight)
table_view.horizontalHeader().setFont(font)
def set_view_header_weights(view, weight):
header = view.horizontalHeader()
set_header_weight(header, weight)
def set_v_header_height(view, size):
v_header = view.verticalHeader()
v_header.setSectionResizeMode(QHeaderView.Fixed)
v_header.setDefaultSectionSize(size)
def setup_table_header(table, labels):
header = table.horizontalHeader()
header.setSectionResizeMode(0, QHeaderView.Stretch)
# Don't call QHeaderView::setSectionResizeMode() for every column. To automatically apply the passed stretch to all columns,
# just call that method once without iteratively passing an explicit column index:
# e.g., ui->tableView->horizontalHeader()->setSectionResizeMode(QHeaderView::Stretch);.
# The below for loop thus reduces to a simplistic one-liner.
# See also thttps://stackoverflow.com/questions/18293403/columns-auto-resize-to-size-of-qtableview/34190094#34190094
for i in range(1, len(labels)):
header.setSectionResizeMode(i, QHeaderView.ResizeToContents)
header.setDefaultAlignment(Qt.AlignLeft)
header_font = QFont()
header_font.setWeight(QFont.DemiBold)
header.setFont(header_font)
```
#### File: spartan/spartan/main_window.py
```python
import ctypes
import sys
from timeit import default_timer as timer
from PySide2.QtCore import (QEvent, QModelIndex, QRegExp, QSettings,
QSortFilterProxyModel, Qt, Slot)
from PySide2.QtGui import QFont, QKeySequence, QPalette
from PySide2.QtWidgets import (
QAbstractItemView, QApplication, QDesktopWidget, QHeaderView, QListWidget,
QListWidgetItem, QMainWindow, QShortcut, QTableWidget, QTableWidgetItem)
from spartan import *
import database
import storage
from delegate.progress_bar_delegate import ProgressBarDelegate
from gui_constants import *
from gui_helpers import *
from model.fridge_model import FridgeModel
from model.nutrition_model import NutritionTableModel
from ui.ui_mainwindow import Ui_MainWindow
from view.combo_table_view import ComboTableView
from window.optimum_diet_window import OptimumDietWindow
from window.pref_window import PrefWindow
from window.search_window import SearchWindow
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.blockSignals(True)
self.person = Person()
self.blockSignals(False)
self.type_res = Restriction(RESTRICT_TYPES_FILE)
self.fd_res = Restriction(RESTRICT_FDS_FILE)
self.setup_fridge_views()
self.setup_connections()
self.update_fridge_line_edit_placeholder()
self.add_foods_btn.setFocus()
self.display_empty_nutrition()
self.showMaximized()
self.read_settings()
self.show()
def update_fridge_line_edit_placeholder(self):
number = len(self.person.foods)
plural = '' if number == 1 else 's'
text = '🔍 Search fridge ({number} item{plural})'.format(
number=number, plural=plural)
self.fridge_line_edit.setPlaceholderText(text)
def fridge_line_edit_changed(self):
reg_exp = QRegExp(self.fridge_line_edit.text(), Qt.CaseInsensitive)
self.fridge_filter_model.setFilterRegExp(reg_exp)
def setup_fridge_views(self):
self.fridge_model = FridgeModel(foods=self.person.foods)
self.fridge_filter_model = QSortFilterProxyModel(self)
self.fridge_filter_model.setSourceModel(self.fridge_model)
self.fridge_view.setModel(self.fridge_filter_model)
self.prices_view.setModel(self.fridge_filter_model)
self.constraints_view.setModel(self.fridge_filter_model)
self.nut_quant_view.setModel(self.fridge_filter_model)
self.fridge_filter_model.setFilterKeyColumn(NAME_COL)
# Hide col
hide_view_cols(self.fridge_view, F_COLS_TO_HIDE)
hide_view_cols(self.prices_view, P_COLS_TO_HIDE)
hide_view_cols(self.constraints_view, C_COLS_TO_HIDE)
hide_view_cols(self.nut_quant_view, N_COLS_TO_HIDE)
# Header must be explicitly set to visible even if set in Designer
self.fridge_view.horizontalHeader().setVisible(True)
self.prices_view.horizontalHeader().setVisible(True)
self.constraints_view.horizontalHeader().setVisible(True)
self.nut_quant_view.horizontalHeader().setVisible(True)
# Set column width
self.prices_view.setColumnWidth(PRICE_COL, VALUE_COL_WIDTH)
self.prices_view.setColumnWidth(PER_COL, PER_COL_WIDTH)
self.prices_view.setColumnWidth(PRICE_QUANTITY_COL, VALUE_COL_WIDTH)
self.constraints_view.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.nut_quant_view.setColumnWidth(NUT_QUANT_COL, VALUE_COL_WIDTH)
# Set row height
set_v_header_height(self.fridge_view, FRIDGE_V_HEADER_SIZE)
set_v_header_height(self.prices_view, FRIDGE_V_HEADER_SIZE)
set_v_header_height(self.constraints_view, FRIDGE_V_HEADER_SIZE)
set_v_header_height(self.nut_quant_view, FRIDGE_V_HEADER_SIZE)
set_header_font(self.fridge_view, FONT_MAIN_SIZE, QFont.DemiBold)
set_header_font(self.prices_view, FONT_MAIN_SIZE, QFont.DemiBold)
set_header_font(self.constraints_view, FONT_MAIN_SIZE, QFont.DemiBold)
set_header_font(self.nut_quant_view, FONT_MAIN_SIZE, QFont.DemiBold)
# Set header fixed
self.fridge_view.horizontalHeader().setSectionResizeMode(QHeaderView.Fixed)
self.prices_view.horizontalHeader().setSectionResizeMode(QHeaderView.Fixed)
#self.constraints_view.horizontalHeader().setSectionResizeMode(QHeaderView.Fixed)
self.nut_quant_view.horizontalHeader().setSectionResizeMode(QHeaderView.Fixed)
# Hide fridge scrollbar
self.fridge_view.verticalScrollBar().setStyleSheet(
"QScrollBar {width:0px;}")
def display_empty_nutrition(self):
macros, vits, minerals = get_empty_nutrition(self.person)
macros_model = NutritionTableModel(nutrients=macros, nutrient_group='General')
vits_model = NutritionTableModel(nutrients=vits, nutrient_group='Vitamins')
minerals_model = NutritionTableModel(
nutrients=minerals, nutrient_group='Minerals')
self.setup_nutrition_view(self.macros_view, macros_model)
self.setup_nutrition_view(self.vits_view, vits_model)
self.setup_nutrition_view(self.minerals_view, minerals_model)
def display_nutrition(self):
selected_food_id_ixs = self.fridge_view.selectionModel().selectedRows()
if len(selected_food_id_ixs) == 0:
self.display_empty_nutrition()
return
selected_food_ids = [ix.data() for ix in selected_food_id_ixs]
selected_food_amounts = [ix.siblingAtColumn(
NUT_QUANT_COL).data() for ix in selected_food_id_ixs]
selected_food_units = [ix.siblingAtColumn(
NUT_QUANT_UNIT_COL).data() for ix in selected_food_id_ixs]
# Convert non-gram quantities to grams
for i, (unit, food_id, amount) in enumerate(zip(selected_food_units, selected_food_ids, selected_food_amounts)):
if unit != 'g':
converted_amount = convert_quantity(amount, unit)
selected_food_amounts[i] = converted_amount
macros, vits, minerals = get_nutrition(
self.person, selected_food_ids, selected_food_amounts)
macros_model = NutritionTableModel(nutrients=macros, nutrient_group='General')
vits_model = NutritionTableModel(nutrients=vits, nutrient_group='Vitamins')
minerals_model = NutritionTableModel(nutrients=minerals, nutrient_group='Minerals')
self.setup_nutrition_view(self.macros_view, macros_model)
self.setup_nutrition_view(self.vits_view, vits_model)
self.setup_nutrition_view(self.minerals_view, minerals_model)
def setup_nutrition_view(self, view, model):
view.setModel(model)
view.setItemDelegate(ProgressBarDelegate(self))
set_header_font(view, FONT_MAIN_SIZE, QFont.DemiBold)
set_column_widths(view, nut_col_to_attr.keys(), nut_col_widths)
view.horizontalHeader().setSectionResizeMode(QHeaderView.Fixed)
vertical_resize_table_view_to_contents(view)
def remove_from_fridge(self):
selected_food_id_indexes = self.fridge_view.selectionModel().selectedRows()
food_ids = []
for index in selected_food_id_indexes:
food_ids.append(index.data(Qt.DisplayRole))
row = selected_food_id_indexes[0].row()
count = selected_food_id_indexes[-1].row() - row + 1
self.fridge_filter_model.removeRows(row, count)
self.person.remove_foods_from_db(food_ids=food_ids)
def update_foods(self, index):
if index.column() != FOOD_ID_COL:
food = self.person.foods[index.row()]
self.person.update_food_in_user_db(food=food)
def open_search_window(self):
self.search_window = SearchWindow(self, self.person, self.fridge_model, self.type_res, self.fd_res)
self.search_window.setAttribute(Qt.WA_DeleteOnClose)
def open_pref(self):
self.pref_window = PrefWindow(
parent=self, person=self.person, type_res=self.type_res, fd_res=self.fd_res)
self.pref_window.setAttribute(Qt.WA_DeleteOnClose)
def optimize(self):
if len(self.person.foods) == 0:
return
self.optimum_diet_window = OptimumDietWindow(self.person, self.type_res, self.fd_res, self)
self.optimum_diet_window.setAttribute(Qt.WA_DeleteOnClose)
def toggle_remove_btn(self):
if self.fridge_view.selectionModel() is None:
self.remove_btn.setEnabled(False)
elif len(self.fridge_view.selectionModel().selectedRows()) > 0:
self.remove_btn.setEnabled(True)
else:
self.remove_btn.setEnabled(False)
def setup_connections(self):
self.fridge_model.dataChanged.connect(self.update_foods)
self.fridge_model.dataChanged.connect(self.display_nutrition)
self.fridge_view.selectionModel().selectionChanged.connect(self.display_nutrition)
# Update filtered view
self.fridge_line_edit.textChanged.connect(
self.fridge_line_edit_changed)
# Update fridge search placeholder
self.fridge_model.rowsInserted.connect(
self.update_fridge_line_edit_placeholder)
self.fridge_model.rowsRemoved.connect(
self.update_fridge_line_edit_placeholder)
# Synchronize fridge selection to prices and constraints
self.prices_view.setSelectionModel(self.fridge_view.selectionModel())
self.constraints_view.setSelectionModel(
self.fridge_view.selectionModel())
self.nut_quant_view.setSelectionModel(
self.fridge_view.selectionModel())
# Synchronize scrollbars
self.fridge_view.verticalScrollBar().valueChanged.connect(
self.prices_view.verticalScrollBar().setValue)
self.prices_view.verticalScrollBar().valueChanged.connect(
self.fridge_view.verticalScrollBar().setValue)
self.fridge_view.verticalScrollBar().valueChanged.connect(
self.constraints_view.verticalScrollBar().setValue)
self.constraints_view.verticalScrollBar().valueChanged.connect(
self.fridge_view.verticalScrollBar().setValue)
self.fridge_view.verticalScrollBar().valueChanged.connect(
self.nut_quant_view.verticalScrollBar().setValue)
self.nut_quant_view.verticalScrollBar().valueChanged.connect(
self.fridge_view.verticalScrollBar().setValue)
# Add to fridge button
self.add_foods_btn.clicked.connect(self.open_search_window)
# Remove button
self.remove_btn.clicked.connect(self.remove_from_fridge)
self.fridge_view.selectionModel().selectionChanged.connect(self.toggle_remove_btn)
# Optimize button
self.optimize_btn.clicked.connect(self.optimize)
# Settings button
self.pref_btn.clicked.connect(self.open_pref)
def closeEvent(self, event):
settings = QSettings("spartan", "spartan")
settings.setValue("geometry", self.saveGeometry())
settings.setValue("windowState", self.saveState())
super().closeEvent(event)
def read_settings(self):
settings = QSettings("spartan", "spartan")
self.restoreGeometry(settings.value("geometry"))
self.restoreState(settings.value("windowState"))
```
#### File: spartan/model/search_model.py
```python
from PySide2.QtCore import (Qt, QAbstractTableModel)
from constants import fd_grp_search_name
from gui_constants import Search
class SearchModel(QAbstractTableModel):
def __init__(self, search_result):
QAbstractTableModel.__init__(self)
self.search_result = search_result
def rowCount(self, parent):
return len(self.search_result)
def columnCount(self, parent):
return len(Search.attrs)
def data(self, index, role):
if not index.isValid() or not 0 <= index.row() < len(self.search_result):
return None
if role == Qt.DisplayRole:
value = self.search_result[index.row()][index.column()]
if index.column() == Search.attr_to_col['fd_grp']:
return fd_grp_search_name[value]
else:
return value
if role == Qt.ToolTipRole:
if index.column() == Search.attr_to_col['name']:
return self.search_result[index.row()][index.column()]
return None
```
#### File: spartan/test/test_spartan.py
```python
import unittest
import spartan
class TestFood(unittest.TestCase):
def test_get_selectable_units_without_amount_prefix(self):
food = spartan.Food(1001)
correct_units = [
'g', 'oz (28.35 g)', 'lb (453.6 g)', 'pat (1" sq, 1/3" high) (5 g)',
'tbsp (14.2 g)', 'cup (227 g)', 'stick (113 g)']
self.assertEqual(food.get_selectable_units(), correct_units)
def test_get_selectable_units_with_amount_prefix(self):
food = spartan.Food(1090)
correct_units = [
'g', 'oz (28.35 g)', 'lb (453.6 g)', '0.25 cup (32 g)', 'cup (128 g)']
self.assertEqual(food.get_selectable_units(), correct_units)
class TestSpartan(unittest.TestCase):
""" Test classless Spartan functions"""
def test_convert_quantity(self):
self.assertEqual(spartan.convert_quantity(
3.2, 'pat (1" sq, 1/3" high) (3.8 g)'), 12.16)
if __name__ == '__main__':
unittest.main()
```
#### File: spartan/spartan/welcome_window.py
```python
import sys
from PySide2.QtWidgets import QApplication, QMainWindow
import spartan
import storage
import main_window
from ui.ui_welcomewindow import Ui_WelcomeWindow
class WelcomeWindow(QMainWindow, Ui_WelcomeWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.setup_connections()
self.show()
def accept(self):
if not self.req_wiz_widget.fields_are_valid():
return
storage.create_spartan_db()
sex = self.req_wiz_widget.sex
year = self.req_wiz_widget.bd_year
mon = self.req_wiz_widget.bd_mon
day = self.req_wiz_widget.bd_day
spartan.update_sex_bd_in_db(sex, year, mon, day)
macro_nuts = self.req_wiz_widget.macro_model.nutrients
vit_nuts = self.req_wiz_widget.vit_model.nutrients
mineral_nuts = self.req_wiz_widget.mineral_model.nutrients
nutrients = macro_nuts + vit_nuts + mineral_nuts
spartan.update_nuts_in_db(nutrients)
self.update_wel_check()
self.run_main_window()
self.hide()
def update_wel_check(self):
with open('run_wel_check.csv', 'w') as check_file:
check_file.write('skip')
def run_main_window(self):
self.main_window = main_window.MainWindow()
self.main_window.show()
def reject(self):
self.close()
def setup_connections(self):
self.button_box.accepted.connect(self.accept)
self.button_box.rejected.connect(self.reject)
if __name__ == "__main__":
app = QApplication(sys.argv)
welcome_window = WelcomeWindow()
sys.exit(app.exec_())
```
#### File: spartan/widget/req_wiz_widget.py
```python
import sys
import datetime
from PySide2.QtCore import Qt
from PySide2.QtGui import QKeySequence, QPalette, QIntValidator, QFont, QPalette, QColor, QPixmap
from PySide2.QtWidgets import (QApplication, QWidget, QStyleFactory, QDialog, QShortcut,
QHeaderView, QListView, QStyledItemDelegate, QStyleFactory)
from spartan import *
import req
from gui_constants import *
import gui_helpers
from model.requirements_model import RequirementsModel
from delegate.lineedit_delegate import LineEditDelegate
from ui.ui_reqwizwidget import Ui_ReqWizWidget
class ReqWizWidget(QWidget, Ui_ReqWizWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.setup_connections()
self.set_validators()
self.set_defaults()
self.init_req()
self.sex_edit.setView(QListView())
def set_defaults(self):
self.bd_year, self.bd_mon, self.bd_day = None, None, None
self.sex = 'f'
def set_validators(self):
int_validator = QIntValidator()
self.day_edit.setValidator(int_validator)
self.mon_edit.setValidator(int_validator)
self.year_edit.setValidator(int_validator)
def init_req(self):
self.macro, self.vit, self.mineral = req.get_empty_reqs()
self.display_req()
def update_displayed_req(self):
if not self.fields_are_valid():
return
self.age_range = req.calculate_age_range(
self.bd_year, self.bd_mon, self.bd_day)
self.macro, self.vit, self.mineral = req.get_reqs(
self.age_range, self.sex)
self.display_req()
def display_req(self):
self.macro_model = RequirementsModel(
nutrients=self.macro, nutrient_group='General')
self.vit_model = RequirementsModel(
nutrients=self.vit, nutrient_group='Vitamins')
self.mineral_model = RequirementsModel(
nutrients=self.mineral, nutrient_group='Minerals')
self.macro_view.setModel(self.macro_model)
self.vit_view.setModel(self.vit_model)
self.mineral_view.setModel(self.mineral_model)
self.setup_req_view(self.macro_view)
self.setup_req_view(self.vit_view)
self.setup_req_view(self.mineral_view)
def setup_req_view(self, view):
gui_helpers.hide_view_cols(view, [Req.attr_to_col['nut_id']])
view.setColumnWidth(Req.attr_to_col['name'], 150)
gui_helpers.vertical_resize_table_view_to_contents(view)
gui_helpers.set_header_font(view, FONT_SECONDARY_SIZE, QFont.DemiBold)
def fields_are_valid(self):
if None in (self.bd_day, self.bd_mon, self.bd_year):
return False
if not self.valid_date():
return False
if len(str(self.bd_year)) < 4:
return False
return True
def valid_date(self):
try:
datetime.datetime(self.bd_year, self.bd_mon, self.bd_day)
date_validity = True
except ValueError:
date_validity = False
return date_validity
def day_edit_changed(self, day):
self.bd_day = int(day)
def mon_edit_changed(self, mon):
self.bd_mon = int(mon)
def year_edit_changed(self, year):
self.bd_year = int(year)
def sex_edit_changed(self, index):
self.sex = index_to_sex[index]
def setup_connections(self):
self.day_edit.textChanged.connect(self.day_edit_changed)
self.mon_edit.textChanged.connect(self.mon_edit_changed)
self.year_edit.textChanged.connect(self.year_edit_changed)
self.sex_edit.currentIndexChanged[int].connect(self.sex_edit_changed)
self.day_edit.textChanged.connect(self.update_displayed_req)
self.mon_edit.textChanged.connect(self.update_displayed_req)
self.year_edit.textChanged.connect(self.update_displayed_req)
self.sex_edit.currentIndexChanged[int].connect(self.update_displayed_req)
# Debug
debug_shortcut = QShortcut(QKeySequence(Qt.Key_F1), self)
debug_shortcut.activated.connect(self.print_debug_info)
def print_debug_info(self):
print(self.person.macro)
``` |
{
"source": "joshmoore/ansible-role-ice",
"score": 2
} |
#### File: ice36all/tests/test_default.py
```python
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_ice_version(host):
assert host.exists('icegridnode')
c = host.run('icegridnode --version')
assert c.rc == 0
assert c.stderr.startswith('3.6.')
def test_icepy_version(host):
c = host.run('python -c "import Ice; print Ice.stringVersion()"')
assert c.stdout.startswith('3.6.')
def test_ice_devel(host):
assert host.package('ice-all-devel').is_installed
``` |
{
"source": "joshmoore/ansible-role-munin",
"score": 2
} |
#### File: default/tests/test_default.py
```python
from glob import glob
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_services_running_and_enabled(host):
service = host.service('munin-node')
assert service.is_running
assert service.is_enabled
# Munin relies on cron to gather stats at regular intervals bit cron isn't
# enabled by default in docker, and in any case we don't want to wait.
# Instead delete the generated files and manually run an update
def test_gather_stats(host):
for f in glob('/var/www/html/munin/*.html'):
os.remove(f)
with host.sudo('munin'):
out = host.check_output('/usr/bin/munin-cron')
assert len(out) == 0
assert host.file('/var/www/html/munin/system-day.html').exists
``` |
{
"source": "joshmoore/ansible-role-omero-web",
"score": 2
} |
#### File: active/tests/test_default.py
```python
import os
import pytest
import re
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
OMERO = '/opt/omero/web/OMERO.web/bin/omero'
VERSION_PATTERN = re.compile('(\d+)\.(\d+)\.(\d+)-ice36-')
def test_omero_web_config(host):
with host.sudo('omero-web'):
cfg = host.check_output("%s config get" % OMERO)
assert cfg == (
'omero.web.server_list=[["localhost", 12345, "molecule-test"]]')
def test_omero_version(host):
with host.sudo('omero-web'):
ver = host.check_output("%s version" % OMERO)
m = VERSION_PATTERN.match(ver)
assert m is not None
assert int(m.group(1)) >= 5
assert int(m.group(2)) > 3
@pytest.mark.parametrize("name", ["omero-web", "nginx"])
def test_services_running_and_enabled(host, name):
service = host.service(name)
assert service.is_running
assert service.is_enabled
def test_nginx_gateway(host):
out = host.check_output('curl -L localhost')
assert 'OMERO.web - Login' in out
def test_omero_web_config_applied(host):
out = host.check_output('curl -L localhost')
assert 'molecule-test:12345' in out
``` |
{
"source": "joshmoore/ansible-role-prometheus-node",
"score": 2
} |
#### File: default/tests/test_default.py
```python
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_services_running_and_enabled(host):
assert host.service('prometheus-node-exporter').is_running
assert host.service('prometheus-node-exporter').is_enabled
def test_node_exporter_metrics(host):
out = host.check_output('curl http://localhost:19100/metrics')
assert 'process_cpu_seconds_total' in out
``` |
{
"source": "joshmoore/ansible-role-rsync-server",
"score": 2
} |
#### File: default/tests/test_default.py
```python
import os
import re
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_rsync_running(host):
assert host.service('rsyncd').is_running
assert host.service('rsyncd').is_enabled
def test_connect_rsync(host):
out = host.check_output('rsync rsync://localhost')
assert re.match(r'dataset-1\s+Public datasets', out)
``` |
{
"source": "joshmoore/napari-ome-zarr",
"score": 2
} |
#### File: napari-ome-zarr/napari_ome_zarr/_reader.py
```python
import logging
import warnings
from typing import Any, Callable, Dict, Iterator, List, Optional
from ome_zarr.data import CHANNEL_DIMENSION
from ome_zarr.io import parse_url
from ome_zarr.reader import Label, Node, Reader
from ome_zarr.types import LayerData, PathLike, ReaderFunction
try:
from napari_plugin_engine import napari_hook_implementation
except ImportError:
def napari_hook_implementation(
func: Callable, *args: Any, **kwargs: Any
) -> Callable:
return func
LOGGER = logging.getLogger("ome_zarr.napari")
@napari_hook_implementation
def napari_get_reader(path: PathLike) -> Optional[ReaderFunction]:
"""Returns a reader for supported paths that include IDR ID.
- URL of the form: https://s3.embassy.ebi.ac.uk/idr/zarr/v0.1/ID.zarr/
"""
if isinstance(path, list):
if len(path) > 1:
warnings.warn("more than one path is not currently supported")
path = path[0]
zarr = parse_url(path)
if zarr:
reader = Reader(zarr)
return transform(reader())
# Ignoring this path
return None
def transform(nodes: Iterator[Node]) -> Optional[ReaderFunction]:
def f(*args: Any, **kwargs: Any) -> List[LayerData]:
results: List[LayerData] = list()
for node in nodes:
data: List[Any] = node.data
metadata: Dict[str, Any] = node.metadata
if data is None or len(data) < 1:
LOGGER.debug(f"skipping non-data {node}")
else:
LOGGER.debug(f"transforming {node}")
shape = data[0].shape
layer_type: str = "image"
if node.load(Label):
layer_type = "labels"
if "colormap" in metadata:
del metadata["colormap"]
elif shape[CHANNEL_DIMENSION] > 1:
metadata["channel_axis"] = CHANNEL_DIMENSION
else:
for x in ("name", "visible", "contrast_limits", "colormap"):
if x in metadata:
try:
metadata[x] = metadata[x][0]
except Exception:
del metadata[x]
rv: LayerData = (data, metadata, layer_type)
LOGGER.debug(f"Transformed: {rv}")
results.append(rv)
return results
return f
``` |
{
"source": "joshmoore/PyTables",
"score": 3
} |
#### File: PyTables/bench/LRU-experiments.py
```python
from time import time
from tables import *
import tables
print "PyTables version-->", tables.__version__
filename = "/tmp/junk-tables-100.h5"
NLEAVES = 2000
NROWS = 1000
class Particle(IsDescription):
name = StringCol(16, pos=1) # 16-character String
lati = Int32Col(pos=2) # integer
longi = Int32Col(pos=3) # integer
pressure = Float32Col(pos=4) # float (single-precision)
temperature = Float64Col(pos=5) # double (double-precision)
def create_junk():
# Open a file in "w"rite mode
fileh = openFile(filename, mode = "w")
# Create a new group
group = fileh.createGroup(fileh.root, "newgroup")
for i in xrange(NLEAVES):
# Create a new table in newgroup group
table = fileh.createTable(group, 'table'+str(i), Particle,
"A table", Filters(1))
particle = table.row
print "Creating table-->", table._v_name
# Fill the table with particles
for i in xrange(NROWS):
# This injects the row values.
particle.append()
table.flush()
# Finally, close the file
fileh.close()
def modify_junk_LRU():
fileh = openFile(filename,'a')
group = fileh.root.newgroup
for j in range(5):
print "iter -->", j
for tt in fileh.walkNodes(group):
if isinstance(tt,Table):
pass
# for row in tt:
# pass
fileh.close()
def modify_junk_LRU2():
fileh = openFile(filename,'a')
group = fileh.root.newgroup
for j in range(20):
t1 = time()
for i in range(100):
# print "table-->", tt._v_name
tt = getattr(group,"table"+str(i))
# for row in tt:
# pass
print "iter and time -->", j+1, round(time()-t1,3)
fileh.close()
def modify_junk_LRU3():
fileh = openFile(filename,'a')
group = fileh.root.newgroup
for j in range(3):
t1 = time()
for tt in fileh.walkNodes(group, "Table"):
title = tt.attrs.TITLE
for row in tt:
pass
print "iter and time -->", j+1, round(time()-t1,3)
fileh.close()
if 1:
#create_junk()
#modify_junk_LRU() # uses the iterator version (walkNodes)
#modify_junk_LRU2() # uses a regular loop (getattr)
modify_junk_LRU3() # uses a regular loop (getattr)
else:
import profile, pstats
profile.run('modify_junk_LRU2()', 'modify.prof')
stats = pstats.Stats('modify.prof')
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats()
```
#### File: doc/scripts/tutorial1.py
```python
import os, traceback
SECTION = "I HAVE NO TITLE"
def tutsep():
print '----8<----', SECTION, '----8<----'
def tutprint(obj):
tutsep()
print obj
def tutrepr(obj):
tutsep()
print repr(obj)
def tutexc():
tutsep()
traceback.print_exc(file=sys.stdout)
SECTION = "Importing tables objects"
from numpy import *
from tables import *
SECTION = "Declaring a Column Descriptor"
# Define a user record to characterize some kind of particles
class Particle(IsDescription):
name = StringCol(16) # 16-character String
idnumber = Int64Col() # Signed 64-bit integer
ADCcount = UInt16Col() # Unsigned short integer
TDCcount = UInt8Col() # unsigned byte
grid_i = Int32Col() # integer
grid_j = IntCol() # integer (equivalent to Int32Col)
pressure = Float32Col() # float (single-precision)
energy = FloatCol() # double (double-precision)
SECTION = "Creating a PyTables file from scratch"
# Open a file in "w"rite mode
h5file = openFile('tutorial1.h5', mode = "w", title = "Test file")
SECTION = "Creating a new group"
# Create a new group under "/" (root)
group = h5file.createGroup("/", 'detector', 'Detector information')
SECTION = "Creating a new table"
# Create one table on it
table = h5file.createTable(group, 'readout', Particle, "Readout example")
tutprint(h5file)
tutrepr(h5file)
# Get a shortcut to the record object in table
particle = table.row
# Fill the table with 10 particles
for i in xrange(10):
particle['name'] = 'Particle: %6d' % (i)
particle['TDCcount'] = i % 256
particle['ADCcount'] = (i * 256) % (1 << 16)
particle['grid_i'] = i
particle['grid_j'] = 10 - i
particle['pressure'] = float(i*i)
particle['energy'] = float(particle['pressure'] ** 4)
particle['idnumber'] = i * (2 ** 34)
# Insert a new particle record
particle.append()
# Flush the buffers for table
table.flush()
SECTION = "Reading (and selecting) data in a table"
# Read actual data from table. We are interested in collecting pressure values
# on entries where TDCcount field is greater than 3 and pressure less than 50
table = h5file.root.detector.readout
pressure = [ x['pressure'] for x in table
if x['TDCcount']>3 and 20<=x['pressure']<50 ]
tutrepr(pressure)
# Read also the names with the same cuts
names = [ x['name'] for x in table
if x['TDCcount'] > 3 and 20 <= x['pressure'] < 50 ]
tutrepr(names)
SECTION = "Creating new array objects"
gcolumns = h5file.createGroup(h5file.root, "columns", "Pressure and Name")
tutrepr(
h5file.createArray(gcolumns, 'pressure', array(pressure),
"Pressure column selection")
)
tutrepr(
h5file.createArray('/columns', 'name', names, "Name column selection")
)
tutprint(h5file)
SECTION = "Closing the file and looking at its content"
# Close the file
h5file.close()
tutsep()
os.system('h5ls -rd tutorial1.h5')
tutsep()
os.system('ptdump tutorial1.h5')
"""This example shows how to browse the object tree and enlarge tables.
Before to run this program you need to execute first tutorial1-1.py
that create the tutorial1.h5 file needed here.
"""
SECTION = "Traversing the object tree"
# Reopen the file in append mode
h5file = openFile("tutorial1.h5", "a")
# Print the object tree created from this filename
# List all the nodes (Group and Leaf objects) on tree
tutprint(h5file)
# List all the nodes (using File iterator) on tree
tutsep()
for node in h5file:
print node
# Now, only list all the groups on tree
tutsep()
for group in h5file.walkGroups("/"):
print group
# List only the arrays hanging from /
tutsep()
for group in h5file.walkGroups("/"):
for array in h5file.listNodes(group, classname = 'Array'):
print array
# This gives the same result
tutsep()
for array in h5file.walkNodes("/", "Array"):
print array
# And finally, list only leafs on /detector group (there should be one!)
# Other way using iterators and natural naming
tutsep()
for leaf in h5file.root.detector('Leaf'):
print leaf
SECTION = "Setting and getting user attributes"
# Get a pointer to '/detector/readout'
table = h5file.root.detector.readout
# Attach it a string (date) attribute
table.attrs.gath_date = "Wed, 06/12/2003 18:33"
# Attach a floating point attribute
table.attrs.temperature = 18.4
table.attrs.temp_scale = "Celsius"
# Get a pointer to '/detector'
detector = h5file.root.detector
# Attach a general object to the parent (/detector) group
detector._v_attrs.stuff = [5, (2.3, 4.5), "Integer and tuple"]
# Now, get the attributes
tutrepr(table.attrs.gath_date)
tutrepr(table.attrs.temperature)
tutrepr(table.attrs.temp_scale)
tutrepr(detector._v_attrs.stuff)
# Delete permanently the attribute gath_date of /detector/readout
del table.attrs.gath_date
# Print a representation of all attributes in /detector/table
tutrepr(table.attrs)
# Get the (user) attributes of /detector/table
tutprint(table.attrs._f_list("user"))
# Get the (sys) attributes of /detector/table
tutprint(table.attrs._f_list("sys"))
# Rename an attribute
table.attrs._f_rename("temp_scale","tempScale")
tutprint(table.attrs._f_list())
# Try to rename a system attribute:
try:
table.attrs._f_rename("VERSION", "version")
except:
tutexc()
h5file.flush()
tutsep()
os.system('h5ls -vr tutorial1.h5/detector/readout')
SECTION = "Getting object metadata"
# Get metadata from table
tutsep()
print "Object:", table
tutsep()
print "Table name:", table.name
tutsep()
print "Table title:", table.title
tutsep()
print "Number of rows in table:", table.nrows
tutsep()
print "Table variable names with their type and shape:"
tutsep()
for name in table.colnames:
print name, ':= %s, %s' % (table.coltypes[name], table.colshapes[name])
tutprint(table.__doc__)
# Get the object in "/columns pressure"
pressureObject = h5file.getNode("/columns", "pressure")
# Get some metadata on this object
tutsep()
print "Info on the object:", repr(pressureObject)
tutsep()
print " shape: ==>", pressureObject.shape
tutsep()
print " title: ==>", pressureObject.title
tutsep()
print " type: ==>", pressureObject.type
SECTION = "Reading data from Array objects"
# Read the 'pressure' actual data
pressureArray = pressureObject.read()
tutrepr(pressureArray)
tutsep()
print "pressureArray is an object of type:", type(pressureArray)
# Read the 'name' Array actual data
nameArray = h5file.root.columns.name.read()
tutrepr(nameArray)
print "nameArray is an object of type:", type(nameArray)
# Print the data for both arrays
tutprint("Data on arrays nameArray and pressureArray:")
tutsep()
for i in range(pressureObject.shape[0]):
print nameArray[i], "-->", pressureArray[i]
tutrepr(pressureObject.name)
SECTION = "Appending data to an existing table"
# Create a shortcut to table object
table = h5file.root.detector.readout
# Get the object row from table
particle = table.row
# Append 5 new particles to table
for i in xrange(10, 15):
particle['name'] = 'Particle: %6d' % (i)
particle['TDCcount'] = i % 256
particle['ADCcount'] = (i * 256) % (1 << 16)
particle['grid_i'] = i
particle['grid_j'] = 10 - i
particle['pressure'] = float(i*i)
particle['energy'] = float(particle['pressure'] ** 4)
particle['idnumber'] = i * (2 ** 34) # This exceeds long integer range
particle.append()
# Flush this table
table.flush()
# Print the data using the table iterator:
tutsep()
for r in table:
print "%-16s | %11.1f | %11.4g | %6d | %6d | %8d |" % \
(r['name'], r['pressure'], r['energy'], r['grid_i'], r['grid_j'],
r['TDCcount'])
# Delete some rows on the Table (yes, rows can be removed!)
tutrepr(table.removeRows(5,10))
# Close the file
h5file.close()
```
#### File: PyTables/examples/undo-redo.py
```python
import tables
def setUp(filename):
# Create an HDF5 file
fileh = tables.openFile(filename, mode = "w", title="Undo/Redo demo")
# Create some nodes in there
fileh.createGroup("/", "agroup", "Group 1")
fileh.createGroup("/agroup", "agroup2", "Group 2")
fileh.createArray("/", "anarray", [1,2], "Array 1")
# Enable undo/redo.
fileh.enableUndo()
return fileh
def tearDown(fileh):
# Disable undo/redo.
fileh.disableUndo()
# Close the file
fileh.close()
def demo_6times3marks():
"""Checking with six ops and three marks"""
# Initialize the data base with some nodes
fileh = setUp("undo-redo-6times3marks.h5")
# Create a new array
fileh.createArray('/', 'otherarray1', [3,4], "Another array 1")
fileh.createArray('/', 'otherarray2', [4,5], "Another array 2")
# Put a mark
fileh.mark()
fileh.createArray('/', 'otherarray3', [5,6], "Another array 3")
fileh.createArray('/', 'otherarray4', [6,7], "Another array 4")
# Put a mark
fileh.mark()
fileh.createArray('/', 'otherarray5', [7,8], "Another array 5")
fileh.createArray('/', 'otherarray6', [8,9], "Another array 6")
# Unwind just one mark
fileh.undo()
assert "/otherarray1" in fileh
assert "/otherarray2" in fileh
assert "/otherarray3" in fileh
assert "/otherarray4" in fileh
assert "/otherarray5" not in fileh
assert "/otherarray6" not in fileh
# Unwind another mark
fileh.undo()
assert "/otherarray1" in fileh
assert "/otherarray2" in fileh
assert "/otherarray3" not in fileh
assert "/otherarray4" not in fileh
assert "/otherarray5" not in fileh
assert "/otherarray6" not in fileh
# Unwind all marks
fileh.undo()
assert "/otherarray1" not in fileh
assert "/otherarray2" not in fileh
assert "/otherarray3" not in fileh
assert "/otherarray4" not in fileh
assert "/otherarray5" not in fileh
assert "/otherarray6" not in fileh
# Redo until the next mark
fileh.redo()
assert "/otherarray1" in fileh
assert "/otherarray2" in fileh
assert "/otherarray3" not in fileh
assert "/otherarray4" not in fileh
assert "/otherarray5" not in fileh
assert "/otherarray6" not in fileh
# Redo until the next mark
fileh.redo()
assert "/otherarray1" in fileh
assert "/otherarray2" in fileh
assert "/otherarray3" in fileh
assert "/otherarray4" in fileh
assert "/otherarray5" not in fileh
assert "/otherarray6" not in fileh
# Redo until the end
fileh.redo()
assert "/otherarray1" in fileh
assert "/otherarray2" in fileh
assert "/otherarray3" in fileh
assert "/otherarray4" in fileh
assert "/otherarray5" in fileh
assert "/otherarray6" in fileh
# Tear down the file
tearDown(fileh)
def demo_manyops():
"""Checking many operations together """
# Initialize the data base with some nodes
fileh = setUp("undo-redo-manyops.h5")
# Create an array
array2 = fileh.createArray(fileh.root, 'anarray3',
[3], "Array title 3")
# Create a group
array2 = fileh.createGroup(fileh.root, 'agroup3',
"Group title 3")
# /anarray => /agroup/agroup3/
newNode = fileh.copyNode('/anarray3', '/agroup/agroup2')
newNode = fileh.copyChildren('/agroup', '/agroup3', recursive=1)
# rename anarray
array4 = fileh.renameNode('/anarray', 'anarray4')
# Move anarray
newNode = fileh.copyNode('/anarray3', '/agroup')
# Remove anarray4
fileh.removeNode('/anarray4')
# Undo the actions
fileh.undo()
assert '/anarray4' not in fileh
assert '/anarray3' not in fileh
assert '/agroup/agroup2/anarray3' not in fileh
assert '/agroup3' not in fileh
assert '/anarray4' not in fileh
assert '/anarray' in fileh
# Redo the actions
fileh.redo()
# Check that the copied node exists again in the object tree.
assert '/agroup/agroup2/anarray3' in fileh
assert '/agroup/anarray3' in fileh
assert '/agroup3/agroup2/anarray3' in fileh
assert '/agroup3/anarray3' not in fileh
assert fileh.root.agroup.anarray3 is newNode
assert '/anarray' not in fileh
assert '/anarray4' not in fileh
# Tear down the file
tearDown(fileh)
if __name__ == '__main__':
# run demos
demo_6times3marks()
demo_manyops()
```
#### File: PyTables/tables/link.py
```python
import os
import tables as t
from tables import linkExtension
from tables.node import Node
from tables.utils import lazyattr
from tables.attributeset import AttributeSet
import tables.file
try:
from tables.linkExtension import ExternalLink
except ImportError:
are_extlinks_available = False
else:
are_extlinks_available = True
__version__ = "$Revision$"
def _g_getLinkClass(parent_id, name):
"""Guess the link class."""
return linkExtension._getLinkClass(parent_id, name)
class Link(Node):
"""Abstract base class for all PyTables links.
A link is a node that refers to another node. The `Link` class
inherits from `Node` class and the links that inherits from `Link`
are `SoftLink` and `ExternalLink`. There is not a `HardLink`
subclass because hard links behave like a regular `Group` or `Leaf`.
Contrarily to other nodes, links cannot have HDF5 attributes. This
is an HDF5 library limitation that might be solved in future
releases.
"""
# Properties
@lazyattr
def _v_attrs(self):
"""A `NoAttrs` instance replacing the typical `AttributeSet`
instance of other node objects. The purpose of `NoAttrs` is to
make clear that HDF5 attributes are not supported in link nodes.
"""
class NoAttrs(AttributeSet):
def __getattr__(self, name):
raise KeyError("you cannot get attributes from this "
"`%s` instance" % self.__class__.__name__)
def __setattr__(self, name, value):
raise KeyError("you cannot set attributes to this "
"`%s` instance" % self.__class__.__name__)
def _g_close(self):
pass
return NoAttrs(self)
def __init__(self, parentNode, name, target=None, _log = False):
self._v_new = target is not None
self.target = target
"""The path string to the pointed node."""
super(Link, self).__init__(parentNode, name, _log)
# Public and tailored versions for copy, move, rename and remove methods
def copy(self, newparent=None, newname=None,
overwrite=False, createparents=False):
"""Copy this link and return the new one.
See `Node._f_copy` for a complete explanation of the arguments.
Please note that there is no `recursive` flag since links do not
have child nodes.
"""
newnode = self._f_copy(newparent=newparent, newname=newname,
overwrite=overwrite, createparents=createparents)
# Insert references to a `newnode` via `newname`
newnode._v_parent._g_refNode(newnode, newname, True)
return newnode
def move(self, newparent=None, newname=None, overwrite=False):
"""Move or rename this link.
See `Node._f_move` for a complete explanation of the arguments.
"""
return self._f_move(newparent=newparent, newname=newname,
overwrite=overwrite)
def remove(self):
"""Remove this link from the hierarchy.
"""
return self._f_remove()
def rename(self, newname=None, overwrite=False):
"""Rename this link in place.
See `Node._f_rename` for a complete explanation of the arguments.
"""
return self._f_rename(newname=newname, overwrite=overwrite)
def __repr__(self):
return str(self)
class SoftLink(linkExtension.SoftLink, Link):
"""Represents a soft link (aka symbolic link).
A soft link is a reference to another node in the *same* file
hierarchy. Getting access to the pointed node (this action is
called *dereferencing*) is done via the `__call__` special method (see
below).
"""
# Class identifier.
_c_classId = 'SOFTLINK'
def __call__(self):
"""
Dereference `self.target` and return the object.
Example of use::
>>> f=tables.openFile('data/test.h5')
>>> print f.root.link0
/link0 (SoftLink) -> /another/path
>>> print f.root.link0()
/another/path (Group) ''
"""
target = self.target
# Check for relative pathnames
if not self.target.startswith('/'):
target = self._v_parent._g_join(self.target)
return self._v_file._getNode(target)
def __str__(self):
"""
Return a short string representation of the link.
Example of use::
>>> f=tables.openFile('data/test.h5')
>>> print f.root.link0
/link0 (SoftLink) -> /path/to/node
"""
classname = self.__class__.__name__
target = self.target
# Check for relative pathnames
if not self.target.startswith('/'):
target = self._v_parent._g_join(self.target)
if target in self._v_file:
dangling = ""
else:
dangling = " (dangling)"
return "%s (%s) -> %s%s" % (self._v_pathname, classname,
self.target, dangling)
if are_extlinks_available:
# Declare this only if the extension is available
class ExternalLink(linkExtension.ExternalLink, Link):
"""Represents an external link.
An external link is a reference to a node in *another* file.
Getting access to the pointed node (this action is called
*dereferencing*) is done via the `__call__` special method (see
below).
.. Warning:: External links are only supported when PyTables is
compiled against HDF5 1.8.x series. When using PyTables with
HDF5 1.6.x, the *parent* group containing external link
objects will be mapped to an `Unknown` instance and you won't
be able to access *any* node hanging of this parent group.
It follows that if the parent group containing the external
link is the root group, you won't be able to read *any*
information contained in the file when using HDF5 1.6.x.
"""
# Class identifier.
_c_classId = 'EXTERNALLINK'
def __init__(self, parentNode, name, target=None, _log = False):
self.extfile = None
"""The external file handler, if the link has been
dereferenced. In case the link has not been dereferenced
yet, its value is None."""
super(ExternalLink, self).__init__(parentNode, name, target, _log)
def _get_filename_node(self):
"""Return the external filename and nodepath from `self.target`."""
# This is needed for avoiding the 'C:\\file.h5' filepath notation
filename, target = self.target.split(':/')
return filename, '/'+target
def __call__(self, **kwargs):
"""
Dereference `self.target` and return the object.
You can pass all the arguments (except `filename`, of course)
supported by the `openFile()` function so as to open the
referenced external file.
Example of use::
>>> f=tables.openFile('data1/test1.h5')
>>> print f.root.link2
/link2 (ExternalLink) -> data2/test2.h5:/path/to/node
>>> plink2 = f.root.link2('a') # open in 'a'ppend mode
>>> print plink2
/path/to/node (Group) ''
>>> print plink2._v_filename
'data2/test2.h5' # belongs to referenced file
"""
filename, target = self._get_filename_node()
if not os.path.isabs(filename):
# Resolve the external link with respect to the this
# file's directory. See #306.
base_directory = os.path.dirname(self._v_file.filename)
filename = os.path.join(base_directory, filename)
# Fetch the external file and save a reference to it.
# Check first in already opened files.
open_files = tables.file._open_files
if filename in open_files:
self.extfile = open_files[filename]
else:
self.extfile = t.openFile(filename, **kwargs)
return self.extfile._getNode(target)
def umount(self):
"""Safely unmount `self.extfile`, if opened."""
extfile = self.extfile
# Close external file, if open
if extfile is not None and extfile.isopen:
extfile.close()
self.extfile = None
def _f_close(self):
"""Especific close for external links."""
self.umount()
super(ExternalLink, self)._f_close()
def __str__(self):
"""
Return a short string representation of the link.
Example of use::
>>> f=tables.openFile('data1/test1.h5')
>>> print f.root.link2
/link2 (ExternalLink) -> data2/test2.h5:/path/to/node
"""
classname = self.__class__.__name__
return "%s (%s) -> %s" % (self._v_pathname, classname, self.target)
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
```
#### File: tables/scripts/ptrepack.py
```python
import sys
import os.path
import time
import getopt
import warnings
from tables.file import openFile
from tables.group import Group
from tables.leaf import Filters
from tables.exceptions import OldIndexWarning, NoSuchNodeError, FlavorWarning
# Global variables
verbose = False
regoldindexes = True
createsysattrs = True
def newdstGroup(dstfileh, dstgroup, title, filters):
group = dstfileh.root
# Now, create the new group. This works even if dstgroup == '/'
for nodeName in dstgroup.split('/'):
if nodeName == '':
continue
# First try if possible intermediate groups does already exist.
try:
group2 = dstfileh.getNode(group, nodeName)
except NoSuchNodeError:
# The group does not exist. Create it.
group2 = dstfileh.createGroup(group, nodeName,
title=title,
filters=filters)
group = group2
return group
def recreateIndexes(table, dstfileh, dsttable):
listoldindexes = table._listoldindexes
if listoldindexes != []:
if not regoldindexes:
if verbose:
print "[I]Not regenerating indexes for table: '%s:%s'" % \
(dstfileh.filename, dsttable._v_pathname)
return
# Now, recreate the indexed columns
if verbose:
print "[I]Regenerating indexes for table: '%s:%s'" % \
(dstfileh.filename, dsttable._v_pathname)
for colname in listoldindexes:
if verbose:
print "[I]Indexing column: '%s'. Please wait..." % colname
colobj = dsttable.cols._f_col(colname)
# We don't specify the filters for the indexes
colobj.createIndex(filters = None)
def copyLeaf(srcfile, dstfile, srcnode, dstnode, title,
filters, copyuserattrs, overwritefile, overwrtnodes, stats,
start, stop, step, chunkshape, sortby, checkCSI,
propindexes, upgradeflavors):
# Open the source file
srcfileh = openFile(srcfile, 'r')
# Get the source node (that should exist)
srcNode = srcfileh.getNode(srcnode)
# Get the destination node and its parent
last_slash = dstnode.rindex('/')
if last_slash == len(dstnode)-1:
# print "Detected a trailing slash in destination node. Interpreting it as a destination group."
dstgroup = dstnode[:-1]
elif last_slash > 0:
dstgroup = dstnode[:last_slash]
else:
dstgroup = "/"
dstleaf = dstnode[last_slash+1:]
if dstleaf == "":
dstleaf = srcNode.name
# Check whether the destination group exists or not
if os.path.isfile(dstfile) and not overwritefile:
dstfileh = openFile(dstfile, 'a', PYTABLES_SYS_ATTRS=createsysattrs)
try:
dstGroup = dstfileh.getNode(dstgroup)
except:
# The dstgroup does not seem to exist. Try creating it.
dstGroup = newdstGroup(dstfileh, dstgroup, title, filters)
else:
# The node exists, but it is really a group?
if not isinstance(dstGroup, Group):
# No. Should we overwrite it?
if overwrtnodes:
parent = dstGroup._v_parent
last_slash = dstGroup._v_pathname.rindex('/')
dstgroupname = dstGroup._v_pathname[last_slash+1:]
dstGroup.remove()
dstGroup = dstfileh.createGroup(parent, dstgroupname,
title=title,
filters=filters)
else:
raise RuntimeError, "Please check that the node names are not duplicated in destination, and if so, add the --overwrite-nodes flag if desired."
else:
# The destination file does not exist or will be overwritten.
dstfileh = openFile(dstfile, 'w', title=title, filters=filters,
PYTABLES_SYS_ATTRS=createsysattrs)
dstGroup = newdstGroup(dstfileh, dstgroup, title="", filters=filters)
# Finally, copy srcNode to dstNode
try:
dstNode = srcNode.copy(
dstGroup, dstleaf, filters = filters,
copyuserattrs = copyuserattrs, overwrite = overwrtnodes,
stats = stats, start = start, stop = stop, step = step,
chunkshape = chunkshape,
sortby = sortby, checkCSI = checkCSI, propindexes = propindexes)
except:
(type, value, traceback) = sys.exc_info()
print "Problems doing the copy from '%s:%s' to '%s:%s'" % \
(srcfile, srcnode, dstfile, dstnode)
print "The error was --> %s: %s" % (type, value)
print "The destination file looks like:\n", dstfileh
# Close all the open files:
srcfileh.close()
dstfileh.close()
raise RuntimeError, "Please check that the node names are not duplicated in destination, and if so, add the --overwrite-nodes flag if desired."
# Upgrade flavors in dstNode, if required
if upgradeflavors and srcfileh.format_version.startswith("1"):
# Remove original flavor in case the source file has 1.x format
dstNode.delAttr('FLAVOR')
# Recreate possible old indexes in destination node
if srcNode._c_classId == "TABLE":
recreateIndexes(srcNode, dstfileh, dstNode)
# Close all the open files:
srcfileh.close()
dstfileh.close()
def copyChildren(srcfile, dstfile, srcgroup, dstgroup, title,
recursive, filters, copyuserattrs, overwritefile,
overwrtnodes, stats, start, stop, step,
chunkshape, sortby, checkCSI, propindexes,
upgradeflavors):
"Copy the children from source group to destination group"
# Open the source file with srcgroup as rootUEP
srcfileh = openFile(srcfile, 'r', rootUEP=srcgroup)
# Assign the root to srcGroup
srcGroup = srcfileh.root
created_dstGroup = False
# Check whether the destination group exists or not
if os.path.isfile(dstfile) and not overwritefile:
dstfileh = openFile(dstfile, 'a', PYTABLES_SYS_ATTRS=createsysattrs)
try:
dstGroup = dstfileh.getNode(dstgroup)
except:
# The dstgroup does not seem to exist. Try creating it.
dstGroup = newdstGroup(dstfileh, dstgroup, title, filters)
created_dstGroup = True
else:
# The node exists, but it is really a group?
if not isinstance(dstGroup, Group):
# No. Should we overwrite it?
if overwrtnodes:
parent = dstGroup._v_parent
last_slash = dstGroup._v_pathname.rindex('/')
dstgroupname = dstGroup._v_pathname[last_slash+1:]
dstGroup.remove()
dstGroup = dstfileh.createGroup(parent, dstgroupname,
title=title,
filters=filters)
else:
raise RuntimeError, "Please check that the node names are not duplicated in destination, and if so, add the --overwrite-nodes flag if desired."
else:
# The destination file does not exist or will be overwritten.
dstfileh = openFile(dstfile, 'w', title=title, filters=filters,
PYTABLES_SYS_ATTRS=createsysattrs)
dstGroup = newdstGroup(dstfileh, dstgroup, title="", filters=filters)
created_dstGroup = True
# Copy the attributes to dstGroup, if needed
if created_dstGroup and copyuserattrs:
srcGroup._v_attrs._f_copy(dstGroup)
# Finally, copy srcGroup children to dstGroup
try:
srcGroup._f_copyChildren(
dstGroup, recursive = recursive, filters = filters,
copyuserattrs = copyuserattrs, overwrite = overwrtnodes,
stats = stats, start = start, stop = stop, step = step,
chunkshape = chunkshape,
sortby = sortby, checkCSI = checkCSI, propindexes = propindexes)
except:
(type, value, traceback) = sys.exc_info()
print "Problems doing the copy from '%s:%s' to '%s:%s'" % \
(srcfile, srcgroup, dstfile, dstgroup)
print "The error was --> %s: %s" % (type, value)
print "The destination file looks like:\n", dstfileh
# Close all the open files:
srcfileh.close()
dstfileh.close()
raise RuntimeError, "Please check that the node names are not duplicated in destination, and if so, add the --overwrite-nodes flag if desired. In particular, pay attention that rootUEP is not fooling you."
# Upgrade flavors in dstNode, if required
if upgradeflavors and srcfileh.format_version.startswith("1"):
for dstNode in dstGroup._f_walkNodes("Leaf"):
# Remove original flavor in case the source file has 1.x format
dstNode.delAttr('FLAVOR')
# Convert the remaining tables with old indexes (if any)
for table in srcGroup._f_walkNodes("Table"):
dsttable = dstfileh.getNode(dstGroup, table._v_pathname)
recreateIndexes(table, dstfileh, dsttable)
# Close all the open files:
srcfileh.close()
dstfileh.close()
def main():
global verbose
global regoldindexes
global createsysattrs
usage = """usage: %s [-h] [-v] [-o] [-R start,stop,step] [--non-recursive] [--dest-title=title] [--dont-create-sysattrs] [--dont-copy-userattrs] [--overwrite-nodes] [--complevel=(0-9)] [--complib=lib] [--shuffle=(0|1)] [--fletcher32=(0|1)] [--keep-source-filters] [--chunkshape=value] [--upgrade-flavors] [--dont-regenerate-old-indexes] [--sortby=column] [--checkCSI] [--propindexes] sourcefile:sourcegroup destfile:destgroup
-h -- Print usage message.
-v -- Show more information.
-o -- Overwrite destination file.
-R RANGE -- Select a RANGE of rows (in the form "start,stop,step")
during the copy of *all* the leaves. Default values are
"None,None,1", which means a copy of all the rows.
--non-recursive -- Do not do a recursive copy. Default is to do it.
--dest-title=title -- Title for the new file (if not specified,
the source is copied).
--dont-create-sysattrs -- Do not create sys attrs (default is to do it).
--dont-copy-userattrs -- Do not copy the user attrs (default is to do it).
--overwrite-nodes -- Overwrite destination nodes if they exist. Default is
to not overwrite them.
--complevel=(0-9) -- Set a compression level (0 for no compression, which
is the default).
--complib=lib -- Set the compression library to be used during the copy.
lib can be set to "zlib", "lzo", "bzip2" or "blosc". Defaults to
"zlib".
--shuffle=(0|1) -- Activate or not the shuffling filter (default is active
if complevel>0).
--fletcher32=(0|1) -- Whether to activate or not the fletcher32 filter
(not active by default).
--keep-source-filters -- Use the original filters in source files. The
default is not doing that if any of --complevel, --complib, --shuffle
or --fletcher32 option is specified.
--chunkshape=("keep"|"auto"|int|tuple) -- Set a chunkshape. A value
of "auto" computes a sensible value for the chunkshape of the
leaves copied. The default is to "keep" the original value.
--upgrade-flavors -- When repacking PyTables 1.x files, the flavor of
leaves will be unset. With this, such a leaves will be serialized
as objects with the internal flavor ('numpy' for 2.x series).
--dont-regenerate-old-indexes -- Disable regenerating old indexes. The
default is to regenerate old indexes as they are found.
--sortby=column -- Do a table copy sorted by the index in "column".
For reversing the order, use a negative value in the "step" part of
"RANGE" (see "-R" flag). Only applies to table objects.
--checkCSI -- Force the check for a CSI index for the --sortby column.
--propindexes -- Propagate the indexes existing in original tables. The
default is to not propagate them. Only applies to table objects.
\n""" % os.path.basename(sys.argv[0])
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'hvoR:',
['non-recursive',
'dest-title=',
'dont-create-sysattrs',
'dont-copy-userattrs',
'overwrite-nodes',
'complevel=',
'complib=',
'shuffle=',
'fletcher32=',
'keep-source-filters',
'chunkshape=',
'upgrade-flavors',
'dont-regenerate-old-indexes',
'sortby=',
'checkCSI',
'propindexes',
])
except:
(type, value, traceback) = sys.exc_info()
print "Error parsing the options. The error was:", value
sys.stderr.write(usage)
sys.exit(0)
# default options
overwritefile = False
keepfilters = False
chunkshape = "keep"
complevel = None
complib = None
shuffle = None
fletcher32 = None
title = ""
copyuserattrs = True
rng = None
recursive = True
overwrtnodes = False
upgradeflavors = False
sortby = None
checkCSI = False
propindexes = False
# Get the options
for option in opts:
if option[0] == '-h':
sys.stderr.write(usage)
sys.exit(0)
elif option[0] == '-v':
verbose = True
elif option[0] == '-o':
overwritefile = True
elif option[0] == '-R':
try:
rng = eval("slice("+option[1]+")")
except:
print "Error when getting the range parameter."
(type, value, traceback) = sys.exc_info()
print " The error was:", value
sys.stderr.write(usage)
sys.exit(0)
elif option[0] == '--dest-title':
title = option[1]
elif option[0] == '--dont-create-sysattrs':
createsysattrs = False
elif option[0] == '--dont-copy-userattrs':
copyuserattrs = False
elif option[0] == '--non-recursive':
recursive = False
elif option[0] == '--overwrite-nodes':
overwrtnodes = True
elif option[0] == '--keep-source-filters':
keepfilters = True
elif option[0] == '--chunkshape':
chunkshape = option[1]
if chunkshape.isdigit() or chunkshape.startswith('('):
chunkshape = eval(chunkshape)
elif option[0] == '--upgrade-flavors':
upgradeflavors = True
elif option[0] == '--dont-regenerate-old-indexes':
regoldindexes = False
elif option[0] == '--complevel':
complevel = int(option[1])
elif option[0] == '--complib':
complib = option[1]
elif option[0] == '--shuffle':
shuffle = int(option[1])
elif option[0] == '--fletcher32':
fletcher32 = int(option[1])
elif option[0] == '--sortby':
sortby = option[1]
elif option[0] == '--propindexes':
propindexes = True
elif option[0] == '--checkCSI':
checkCSI = True
else:
print option[0], ": Unrecognized option"
sys.stderr.write(usage)
sys.exit(0)
# if we pass a number of files different from 2, abort
if len(pargs) <> 2:
print "You need to pass both source and destination!."
sys.stderr.write(usage)
sys.exit(0)
# Catch the files passed as the last arguments
src = pargs[0].split(':')
dst = pargs[1].split(':')
if len(src) == 1:
srcfile, srcnode = src[0], "/"
else:
srcfile, srcnode = src
if len(dst) == 1:
dstfile, dstnode = dst[0], "/"
else:
dstfile, dstnode = dst
if srcnode == "":
# case where filename == "filename:" instead of "filename:/"
srcnode = "/"
if dstnode == "":
# case where filename == "filename:" instead of "filename:/"
dstnode = "/"
# Ignore the warnings for tables that contains oldindexes
# (these will be handled by the copying routines)
warnings.filterwarnings("ignore", category=OldIndexWarning)
# Ignore the flavors warnings during upgrading flavor operations
if upgradeflavors:
warnings.filterwarnings("ignore", category=FlavorWarning)
# Build the Filters instance
if ((complevel, complib, shuffle, fletcher32) == (None,)*4 or keepfilters):
filters = None
else:
if complevel is None: complevel = 0
if shuffle is None:
if complevel > 0:
shuffle = True
else:
shuffle = False
if complib is None: complib = "zlib"
if fletcher32 is None: fletcher32 = False
filters = Filters(complevel=complevel, complib=complib,
shuffle=shuffle, fletcher32=fletcher32)
# The start, stop and step params:
start, stop, step = None, None, 1 # Defaults
if rng:
start, stop, step = rng.start, rng.stop, rng.step
# Some timing
t1 = time.time()
cpu1 = time.clock()
# Copy the file
if verbose:
print "+=+"*20
print "Recursive copy:", recursive
print "Applying filters:", filters
if sortby is not None:
print "Sorting table(s) by column:", sortby
print "Forcing a CSI creation:", checkCSI
if propindexes:
print "Recreating indexes in copied table(s)"
print "Start copying %s:%s to %s:%s" % (srcfile, srcnode,
dstfile, dstnode)
print "+=+"*20
# Check whether the specified source node is a group or a leaf
h5srcfile = openFile(srcfile, 'r')
srcnodeobject = h5srcfile.getNode(srcnode)
objectclass = srcnodeobject.__class__.__name__
# Close the file again
h5srcfile.close()
stats = {'groups': 0, 'leaves': 0, 'links': 0, 'bytes': 0}
if isinstance(srcnodeobject, Group):
copyChildren(
srcfile, dstfile, srcnode, dstnode,
title = title, recursive = recursive, filters = filters,
copyuserattrs = copyuserattrs, overwritefile = overwritefile,
overwrtnodes = overwrtnodes, stats = stats,
start = start, stop = stop, step = step, chunkshape = chunkshape,
sortby = sortby, checkCSI = checkCSI, propindexes = propindexes,
upgradeflavors=upgradeflavors)
else:
# If not a Group, it should be a Leaf
copyLeaf(
srcfile, dstfile, srcnode, dstnode,
title = title, filters = filters, copyuserattrs = copyuserattrs,
overwritefile = overwritefile, overwrtnodes = overwrtnodes,
stats = stats, start = start, stop = stop, step = step,
chunkshape = chunkshape,
sortby = sortby, checkCSI = checkCSI, propindexes = propindexes,
upgradeflavors=upgradeflavors)
# Gather some statistics
t2 = time.time()
cpu2 = time.clock()
tcopy = round(t2-t1, 3)
cpucopy = round(cpu2-cpu1, 3)
tpercent = int(round(cpucopy/tcopy, 2)*100)
if verbose:
ngroups = stats['groups']
nleaves = stats['leaves']
nlinks = stats['links']
nbytescopied = stats['bytes']
nnodes = ngroups + nleaves + nlinks
print \
"Groups copied:", ngroups, \
" Leaves copied:", nleaves, \
" Links copied:", nlinks
if copyuserattrs:
print "User attrs copied"
else:
print "User attrs not copied"
print "KBytes copied:", round(nbytescopied/1024.,3)
print "Time copying: %s s (real) %s s (cpu) %s%%" % \
(tcopy, cpucopy, tpercent)
print "Copied nodes/sec: ", round((nnodes) / float(tcopy),1)
print "Copied KB/s :", int(nbytescopied / (tcopy * 1024))
```
#### File: tables/tests/test_numarray.py
```python
import sys
import unittest
import os
import tempfile
from numarray import strings
from numarray import records
from numarray import *
import tables
from tables import *
from tables import nra
from tables.tests import common
from tables.tests.common import allequal
# To delete the internal attributes automagically
unittest.TestCase.tearDown = common.cleanup
types = ['Int8', 'Int16', 'Int32', 'Int64', 'Float32', 'Float64']
types += ['UInt8', 'UInt16', 'UInt32', 'Complex32', 'Complex64']
# UInt64 checking disabled on win platforms
# because this type is not supported
if sys.platform != 'win32':
types += ['UInt64']
types += ['Bool']
class BasicTestCase(unittest.TestCase):
"""Basic test for all the supported types present in numarray.
All of them are included on PyTables.
"""
endiancheck = 0
def WriteRead(self, testArray):
if common.verbose:
print '\n', '-=' * 30
if type(testArray) == NumArray:
type_ = testArray.type()
else:
type_ = "String"
print "Running test for array with type '%s'" % \
type_,
print "for class check:", self.title
# Create an instance of HDF5 Table
self.file = tempfile.mktemp(".h5")
self.fileh = openFile(self.file, mode = "w")
self.root = self.fileh.root
# Create the array under root and name 'somearray'
a = testArray
self.fileh.createArray(self.root, 'somearray', a, "Some array")
# Close the file
self.fileh.close()
# Re-open the file in read-only mode
self.fileh = openFile(self.file, mode = "r")
self.root = self.fileh.root
# Read the saved array
b = self.root.somearray.read()
# For cases that read returns a python type instead of a numarray type
if not hasattr(b, "shape"):
b = array(b, type=a.type())
# Compare them. They should be equal.
#if not allequal(a,b, "numarray") and common.verbose:
if common.verbose and type(a) == NumArray:
print "Array written:", a
print "Array written shape:", a.shape
print "Array written itemsize:", a.itemsize
print "Array written type:", a.type()
print "Array read:", b
print "Array read shape:", b.shape
print "Array read itemsize:", b.itemsize
print "Array read type:", b.type()
type_ = self.root.somearray.atom.type
# Check strictly the array equality
self.assertEqual(type(a), type(b))
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, self.root.somearray.shape)
if type(a) == strings.CharArray:
self.assertEqual(type_, "string")
else:
self.assertEqual(a.type(), b.type())
if not type_.startswith('complex'):
self.assertEqual(str(a.type()).lower(), type_)
else:
if type_ == 'complex64':
self.assertEqual(str(a.type()), "Complex32")
else:
self.assertEqual(str(a.type()), "Complex64")
self.assertTrue(allequal(a,b, "numarray"))
self.fileh.close()
# Then, delete the file
os.remove(self.file)
return
def test00_char(self):
"Data integrity during recovery (character objects)"
a = strings.array(self.tupleChar)
self.WriteRead(a)
return
def test01_char_nc(self):
"Data integrity during recovery (non-contiguous character objects)"
a = strings.array(self.tupleChar)
if a.shape == ():
b = a # We cannot use the indexing notation
else:
b = a[::2]
# Ensure that this numarray string is non-contiguous
if a.shape[0] > 2:
self.assertEqual(b.iscontiguous(), 0)
self.WriteRead(b)
return
def test02_types(self):
"Data integrity during recovery (numerical types)"
for type_ in types:
if self.tupleInt.shape:
a = self.tupleInt.astype(type_)
else:
# shape is the empty tuple ()
a = array(self.tupleInt, type=type_)
self.WriteRead(a)
return
def test03_types_nc(self):
"Data integrity during recovery (non-contiguous numerical types)"
for type_ in types:
if self.tupleInt.shape:
a = self.tupleInt.astype(type_)
else:
# shape is the empty tuple ()
a = array(self.tupleInt, dtype=type_)
# This should not be tested for the rank-0 case
if len(a.shape) == 0:
return
b = a[::2]
# Ensure that this array is non-contiguous (for non-trivial case)
if a.shape[0] > 2:
self.assertEqual(b.iscontiguous(), 0)
self.WriteRead(b)
return
class Basic0DOneTestCase(BasicTestCase):
# Rank-0 case
title = "Rank-0 case 1"
tupleInt = array(3)
tupleChar = "4"
class Basic0DTwoTestCase(BasicTestCase):
# Rank-0 case
title = "Rank-0 case 2"
tupleInt = array(33)
tupleChar = "44"
class Basic1DOneTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 1"
tupleInt = array((3,))
tupleChar = ("a",)
class Basic1DTwoTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 2"
tupleInt = array((0, 4))
tupleChar = ("aaa",)
class Basic1DThreeTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 3"
tupleInt = array((3, 4, 5))
tupleChar = ("aaaa", "bbb",)
class Basic2DTestCase(BasicTestCase):
# 2D case
title = "Rank-2 case 1"
#tupleInt = reshape(array(arange((4)**2)), (4,)*2)
tupleInt = ones((4,)*2)
tupleChar = [["aaa","ddddd"],["d","ss"],["s","tt"]]
class Basic10DTestCase(BasicTestCase):
# 10D case
title = "Rank-10 case 1"
tupleInt = ones((2,)*10, 'Int32')
# The next tuple consumes far more time, so this
# test should be run in common.heavy mode.
# Dimensions greather than 6 in numarray strings gives some warnings
tupleChar = strings.array("abc"*2**6, shape=(2,)*6, itemsize=3)
class GroupsArrayTestCase(unittest.TestCase):
"""This test class checks combinations of arrays with groups.
It also uses arrays ranks which ranges until 10.
"""
def test00_iterativeGroups(self):
"""Checking combinations of arrays with groups
It also uses arrays ranks which ranges until 10.
"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test00_iterativeGroups..." % \
self.__class__.__name__
# Open a new empty HDF5 file
file = tempfile.mktemp(".h5")
fileh = openFile(file, mode = "w")
# Get the root group
group = fileh.root
i = 1
for type_ in types:
# Create an array of type_, with incrementally bigger ranges
a = ones((2,) * i, type_)
# Save it on the HDF5 file
dsetname = 'array_' + type_
if common.verbose:
print "Creating dataset:", group._g_join(dsetname)
hdfarray = fileh.createArray(group, dsetname, a, "Large array")
# Create a new group
group = fileh.createGroup(group, 'group' + str(i))
# increment the range for next iteration
i += 1
# Close the file
fileh.close()
# Open the previous HDF5 file in read-only mode
fileh = openFile(file, mode = "r")
# Get the root group
group = fileh.root
# Get the metadata on the previosly saved arrays
for i in range(1,len(types)):
# Create an array for later comparison
a = ones((2,) * i, types[i - 1])
# Get the dset object hanging from group
dset = getattr(group, 'array_' + types[i-1])
# Get the actual array
b = dset.read()
if not allequal(a,b, "numarray") and common.verbose:
print "Array a original. Shape: ==>", a.shape
print "Array a original. Data: ==>", a
print "Info from dataset:", dset._v_pathname
print " shape ==>", dset.shape,
print " type ==> %s" % dset.atom.type
print "Array b read from file. Shape: ==>", b.shape,
print ". Type ==> %s" % b.type()
self.assertEqual(a.shape, b.shape)
self.assertTrue(allequal(a,b, "numarray"))
# Iterate over the next group
group = getattr(group, 'group' + str(i))
# Close the file
fileh.close()
# Then, delete the file
os.remove(file)
def test01_largeRankArrays(self):
"""Checking creation of large rank arrays (0 < rank <= 32)
It also uses arrays ranks which ranges until maxrank.
"""
# maximum level of recursivity (deepest group level) achieved:
# maxrank = 32 (for a effective maximum rank of 32)
# This limit is due to a limit in the HDF5 library.
minrank = 1
maxrank = 32
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01_largeRankArrays..." % \
self.__class__.__name__
print "Maximum rank for tested arrays:", maxrank
# Open a new empty HDF5 file
file = tempfile.mktemp(".h5")
fileh = openFile(file, mode = "w")
group = fileh.root
if common.verbose:
print "Rank array writing progress: ",
for rank in range(minrank, maxrank + 1):
# Create an array of integers, with incrementally bigger ranges
a = ones((1,) * rank, 'Int32')
if common.verbose:
print "%3d," % (rank),
fileh.createArray(group, "array", a, "Rank: %s" % rank)
group = fileh.createGroup(group, 'group' + str(rank))
# Flush the buffers
fileh.flush()
# Close the file
fileh.close()
# Open the previous HDF5 file in read-only mode
fileh = openFile(file, mode = "r")
group = fileh.root
if common.verbose:
print
print "Rank array reading progress: "
# Get the metadata on the previosly saved arrays
for rank in range(minrank, maxrank + 1):
# Create an array for later comparison
a = ones((1,) * rank, 'Int32')
# Get the actual array
b = group.array.read()
if common.verbose:
print "%3d," % (rank),
if not a.tolist() == b.tolist() and common.verbose:
print "Array b read from file. Shape: ==>", b.shape,
print ". Type ==> %c" % b.type()
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.type(), b.type())
self.assertTrue(allequal(a, b, "numarray"))
# Iterate over the next group
group = fileh.getNode(group, 'group' + str(rank))
if common.verbose:
print # This flush the stdout buffer
# Close the file
fileh.close()
# Delete the file
os.remove(file)
# Test Record class
class Record(IsDescription):
var1 = StringCol(itemsize=4, dflt="abcd", pos=0)
var2 = StringCol(itemsize=1, dflt="a", pos=1)
var3 = BoolCol(dflt=1)
var4 = Int8Col(dflt=1)
var5 = UInt8Col(dflt=1)
var6 = Int16Col(dflt=1)
var7 = UInt16Col(dflt=1)
var8 = Int32Col(dflt=1)
var9 = UInt32Col(dflt=1)
var10 = Int64Col(dflt=1)
var11 = Float32Col(dflt=1.0)
var12 = Float64Col(dflt=1.0)
var13 = ComplexCol(dflt=(1.+0.j), itemsize=8)
var14 = ComplexCol(dflt=(1.+0.j), itemsize=16)
class TableReadTestCase(common.PyTablesTestCase):
nrows = 100
def setUp(self):
# Create an instance of an HDF5 Table
self.file = tempfile.mktemp(".h5")
fileh = openFile(self.file, "w")
table = fileh.createTable(fileh.root, 'table', Record)
for i in range(self.nrows):
table.row.append() # Fill 100 rows with default values
fileh.close()
self.fileh = openFile(self.file, "a") # allow flavor changes
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
def test01_readTableChar(self):
"""Checking column conversion into numarray in read(). Chars."""
table = self.fileh.root.table
table.flavor = "numarray"
for colname in table.colnames:
numcol = table.read(field=colname)
typecol = table.coltypes[colname]
itemsizecol = table.description._v_dtypes[colname].base.itemsize
if typecol == "string":
if itemsizecol > 1:
orignumcol = strings.array(['abcd']*self.nrows, itemsize=4)
else:
orignumcol = strings.array(['a']*self.nrows, itemsize=1)
if common.verbose:
print "Itemsize of column:", itemsizecol
print "Shape of numarray column read:", numcol.shape
print "Should look like:", orignumcol.shape
print "First 3 elements of read col:", numcol[:3]
# Check that both numarray objects are equal
self.assertTrue(allequal(numcol, orignumcol, "numarray"))
def test01_readTableNum(self):
"""Checking column conversion into numarray in read(). Numerical."""
table = self.fileh.root.table
table.flavor="numarray"
for colname in table.colnames:
numcol = table.read(field=colname)
typecol = table.coltypes[colname]
if typecol != "string":
type_ = numcol.type()
if common.verbose:
print "Type of numarray column read:", type_
print "Should look like:", typecol
orignumcol = ones(shape=self.nrows, dtype=numcol.type())
# Check that both numarray objects are equal
self.assertTrue(allequal(numcol, orignumcol, "numarray"))
def test02_readCoordsChar(self):
"""Column conversion into numarray in readCoordinates(). Chars."""
table = self.fileh.root.table
table.flavor = "numarray"
coords = (1,2,3)
self.nrows = len(coords)
for colname in table.colnames:
numcol = table.readCoordinates(coords, field=colname)
typecol = table.coltypes[colname]
itemsizecol = table.description._v_dtypes[colname].base.itemsize
if typecol == "string":
if itemsizecol > 1:
orignumcol = strings.array(['abcd']*self.nrows, itemsize=4)
else:
orignumcol = strings.array(['a']*self.nrows, itemsize=1)
if common.verbose:
print "Itemsize of column:", itemsizecol
print "Shape of numarray column read:", numcol.shape
print "Should look like:", orignumcol.shape
print "First 3 elements of read col:", numcol[:3]
# Check that both numarray objects are equal
self.assertTrue(allequal(numcol, orignumcol, "numarray"))
def test02_readCoordsNum(self):
"""Column conversion into numarray in readCoordinates(). Numerical."""
table = self.fileh.root.table
table.flavor="numarray"
coords = (1,2,3)
self.nrows = len(coords)
for colname in table.colnames:
numcol = table.readCoordinates(coords, field=colname)
typecol = table.coltypes[colname]
if typecol != "string":
type_ = numcol.type()
if typecol == "int64":
return
if common.verbose:
print "Type of read numarray column:", type_
print "Should look like:", typecol
orignumcol = ones(shape=self.nrows, type=numcol.type())
# Check that both numarray objects are equal
self.assertTrue(allequal(numcol, orignumcol, "numarray"))
def test03_getIndexnumarray(self):
"""Getting table rows specifyied as numarray scalar integers."""
table = self.fileh.root.table
coords = array([1,2,3], type='Int8')
for colname in table.colnames:
numcol = [ table[coord][colname].item() for coord in coords ]
typecol = table.coltypes[colname]
if typecol != "string":
if typecol == "bool": # Special case for boolean translation
typecol = "Bool"
numcol = array(numcol, dtype=typecol)
if common.verbose:
type_ = numcol.type()
print "Type of read numarray column:", type_
print "Should look like:", typecol
orignumcol = ones(shape=len(numcol), type=numcol.type())
# Check that both numarray objects are equal
self.assertTrue(allequal(numcol, orignumcol, "numarray"))
def test04_setIndexnumarray(self):
"""Setting table rows specifyied as numarray integers."""
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
table.flavor = "numarray"
coords = array([1,2,3], dtype='int8')
# Modify row 1
# From PyTables 2.0 on, assignments to records can be done
# only as tuples (see http://projects.scipy.org/scipy/numarray/ticket/315)
#table[coords[0]] = ["aasa","x"]+[232]*12
table[coords[0]] = tuple(["aasa","x"]+[232]*12)
#record = list(table[coords[0]])
record = table.read(coords[0])[0]
if common.verbose:
print """Original row:
['aasa', 'x', 232, -24, 232, 232, 1, 232L, 232, (232+0j), 232.0, 232L, (232+0j), 232.0]
"""
print "Read row:\n", record
self.assertEqual(record.field('var1'), 'aasa')
self.assertEqual(record.field('var2'), 'x')
self.assertEqual(record.field('var3'), True)
self.assertEqual(record.field('var4'), -24)
self.assertEqual(record.field('var7'), 232)
# The declaration of the nested table:
class Info(IsDescription):
_v_pos = 3
Name = StringCol(itemsize=2)
Value = ComplexCol(itemsize=16)
class TestTDescr(IsDescription):
"""A description that has several nested columns."""
x = Int32Col(dflt=0, shape=2, pos=0) #0
y = Float64Col(dflt=1, shape=(2,2))
z = UInt8Col(dflt=1)
z3 = EnumCol({'r':4, 'g':2, 'b':1}, dflt='r', base='int32', shape=2)
color = StringCol(itemsize=4, dflt="ab", pos=2)
info = Info()
class Info(IsDescription): #1
_v_pos = 1
name = StringCol(itemsize=2)
value = ComplexCol(itemsize=16, pos=0) #0
y2 = Float64Col(pos=1) #1
z2 = UInt8Col()
class Info2(IsDescription):
y3 = Time64Col(shape=2)
name = StringCol(itemsize=2)
value = ComplexCol(itemsize=16, shape=2)
class TableNativeFlavorTestCase(common.PyTablesTestCase):
nrows = 100
dtype = [('value', 'c16'),
('y2', 'f8'),
('Info2',
[('name', 'a2'),
('value', '(2,)c16'),
('y3', '(2,)f8')]),
('name', 'a2'),
('z2', 'u1')]
_infozeros = nra.array(descr=dtype, shape=3)
# Set the contents to zero (or empty strings)
_infozeros.field('value')[:] = 0
_infozeros.field('y2')[:] = 0
_infozeros.field('Info2/name')[:] = "\0"
_infozeros.field('Info2/value')[:] = 0
_infozeros.field('Info2/y3')[:] = 0
_infozeros.field('name')[:] = "\0"
_infozeros.field('z2')[:] = 0
_infoones = nra.array(descr=dtype, shape=3)
# Set the contents to one (or blank strings)
_infoones.field('value')[:] = 1
_infoones.field('y2')[:] = 1
_infoones.field('Info2/name')[:] = " "
_infoones.field('Info2/value')[:] = 1
_infoones.field('Info2/y3')[:] = 1
_infoones.field('name')[:] = " "
_infoones.field('z2')[:] = 1
def setUp(self):
# Create an instance of an HDF5 Table
self.file = tempfile.mktemp(".h5")
fileh = openFile(self.file, "w")
table = fileh.createTable(fileh.root, 'table', TestTDescr,
expectedrows=self.nrows)
table.flavor = 'numarray'
for i in range(self.nrows):
table.row.append() # Fill 100 rows with default values
table.flush()
self.fileh = fileh
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
def test01a_basicTableRead(self):
"""Checking the return of a numarray in read()."""
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
data = table[:]
if common.verbose:
print "Type of read:", type(data)
print "Formats of the record:", data._formats
print "First 3 elements of read:", data[:3]
# Check the type of the recarray
self.assertTrue(isinstance(data, records.RecArray))
# Check the value of some columns
# A flat column
col = table.cols.x[:3]
self.assertTrue(isinstance(col, NumArray))
npcol = zeros((3,2), type="Int32")
if common.verbose:
print "Plain column:"
print "read column-->", col
print "should look like-->", npcol
self.assertTrue(allequal(col, npcol, "numarray"))
# A nested column
col = table.cols.Info[:3]
self.assertTrue(isinstance(col, records.RecArray))
npcol = self._infozeros
if common.verbose:
print "Nested column:"
print "read column-->", col
print "should look like-->", npcol
self.assertEqual(col.descr, npcol.descr)
self.assertEqual(str(col), str(npcol))
def test01b_basicTableRead(self):
"""Checking the return of a numarray in read() (strided version)."""
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
data = table[::3]
if common.verbose:
print "Type of read:", type(data)
print "Description of the record:", data.descr
print "First 3 elements of read:", data[:3]
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, records.RecArray))
# Check the value of some columns
# A flat column
col = table.cols.x[:9:3]
self.assertTrue(isinstance(col, NumArray))
npcol = zeros((3,2), dtype="Int32")
if common.verbose:
print "Plain column:"
print "read column-->", col
print "should look like-->", npcol
self.assertTrue(allequal(col, npcol, "numarray"))
# A nested column
col = table.cols.Info[:9:3]
self.assertTrue(isinstance(col, records.RecArray))
npcol = self._infozeros
if common.verbose:
print "Nested column:"
print "read column-->", col
print "should look like-->", npcol
self.assertEqual(col.descr, npcol.descr)
self.assertEqual(str(col), str(npcol))
def test02_getWhereList(self):
"""Checking the return of numarray in getWhereList method."""
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
data = table.getWhereList('z == 1')
if common.verbose:
print "Type of read:", type(data)
print "First 3 elements of read:", data[:3]
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, NumArray))
# Check that all columns have been selected
self.assertEqual(len(data), 100)
# Finally, check that the contents are ok
self.assertTrue(allequal(data, arange(100, type="Int64"), "numarray"))
def test03a_readWhere(self):
"""Checking the return of numarray in readWhere method (strings)."""
table = self.fileh.root.table
table.cols.color.createIndex()
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
data = table.readWhere('color == "ab"')
if common.verbose:
print "Type of read:", type(data)
print "Length of the data read:", len(data)
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, records.RecArray))
# Check that all columns have been selected
self.assertEqual(len(data), self.nrows)
def test03b_readWhere(self):
"""Checking the return of numarray in readWhere method (numeric)."""
table = self.fileh.root.table
table.cols.z.createIndex()
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
data = table.readWhere('z == 0')
if common.verbose:
print "Type of read:", type(data)
print "Length of the data read:", len(data)
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, records.RecArray))
# Check that all columns have been selected
self.assertEqual(len(data), 0)
def test04a_createTable(self):
"""Checking the Table creation from a numarray recarray."""
npdata = self._infozeros
table = self.fileh.createTable(self.fileh.root, 'table2', npdata)
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table2
data = table[:]
if common.verbose:
print "Type of read:", type(data)
print "Description of the record:", data.descr
print "First 3 elements of read:", data[:3]
print "Length of the data read:", len(data)
if common.verbose:
print "npdata-->", npdata
print "data-->", data
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, records.RecArray))
# Check the type
self.assertEqual(data.descr, npdata.descr)
self.assertEqual(str(data), str(npdata))
def test04b_appendTable(self):
"""Checking appending a numarray recarray."""
table = self.fileh.root.table
npdata = table[3:6]
table.append(npdata)
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
data = table[-3:]
if common.verbose:
print "Type of read:", type(data)
print "Description of the record:", data.descr
print "Last 3 elements of read:", data[-3:]
print "Length of the data read:", len(data)
if common.verbose:
print "npdata-->", npdata
print "data-->", data
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, records.RecArray))
# Check the type
self.assertEqual(data.descr, npdata.descr)
self.assertEqual(str(data), str(npdata))
def test05a_assignColumn(self):
"""Checking assigning to a column."""
table = self.fileh.root.table
table.cols.z[:] = ones((100,), dtype='UInt8')
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
data = table.cols.z[:]
if common.verbose:
print "Type of read:", type(data)
print "First 3 elements of read:", data[:3]
print "Length of the data read:", len(data)
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, NumArray))
# Check that all columns have been selected
self.assertEqual(len(data), 100)
# Finally, check that the contents are ok
self.assertTrue(allequal(data, ones((100,), dtype="UInt8"), "numarray"))
def test05b_modifyingColumns(self):
"""Checking modifying several columns at once."""
table = self.fileh.root.table
xcol = ones((3,2), 'Int32')
ycol = ones((3,2,2), 'Float64')
zcol = zeros((3,), 'UInt8')
table.modifyColumns(3, 6, 1, [xcol, ycol, zcol], ['x', 'y', 'z'])
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
data = table.cols.y[3:6]
if common.verbose:
print "Type of read:", type(data)
print "First 3 elements of read:", data[:3]
print "Length of the data read:", len(data)
if common.verbose:
print "ycol-->", ycol
print "data-->", data
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, NumArray))
# Check the type
self.assertEqual(data.type(), ycol.type())
self.assertTrue(allequal(data, ycol, "numarray"))
def test05c_modifyingColumns(self):
"""Checking modifying several columns using a numarray buffer."""
table = self.fileh.root.table
dtype=[('x', '(2,)i4'), ('y', '(2,2)f8'), ('z', 'u1')]
nparray = nra.array(shape=(3,), descr=dtype)
nparray.field('x')[:] = 1
nparray.field('y')[:] = 1
nparray.field('z')[:] = 2
table.modifyColumns(3, 6, 1, nparray, ['x', 'y', 'z'])
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
ycol = ones((3, 2, 2), 'Float64')
data = table.cols.y[3:6]
if common.verbose:
print "Type of read:", type(data)
print "First 3 elements of read:", data[:3]
print "Length of the data read:", len(data)
if common.verbose:
print "ycol-->", ycol
print "data-->", data
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, NumArray))
# Check the type
self.assertEqual(data.type(), ycol.type())
self.assertEqual(str(data), str(ycol))
def test06a_assignNestedColumn(self):
"""Checking assigning a nested column (using modifyColumn)."""
npdata = self._infoones
table = self.fileh.root.table
data = table.cols.Info[3:6]
table.modifyColumn(3, 6, 1, column=npdata, colname='Info')
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
data = table.cols.Info[3:6]
if common.verbose:
print "Type of read:", type(data)
print "Description of the record:", data.descr
print "First 3 elements of read:", data[:3]
print "Length of the data read:", len(data)
if common.verbose:
print "npdata-->", npdata
print "data-->", data
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, records.RecArray))
# Check the type
self.assertEqual(data.descr, npdata.descr)
self.assertEqual(str(data), str(npdata))
def test06b_assignNestedColumn(self):
"""Checking assigning a nested column (using the .cols accessor)."""
table = self.fileh.root.table
npdata = self._infoones
table.cols.Info[3:6] = npdata
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
data = table.cols.Info[3:6]
if common.verbose:
print "Type of read:", type(data)
print "Description of the record:", data.descr
print "First 3 elements of read:", data[:3]
print "Length of the data read:", len(data)
if common.verbose:
print "npdata-->", npdata
print "data-->", data
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, records.RecArray))
# Check the type
self.assertEqual(data.descr, npdata.descr)
self.assertEqual(str(data), str(npdata))
def test07a_modifyingRows(self):
"""Checking modifying several rows at once (using modifyRows)."""
table = self.fileh.root.table
# Read a chunk of the table
chunk = table[0:3]
# Modify it somewhat
chunk.field('y')[:] = -1
table.modifyRows(3, 6, 1, rows=chunk)
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
ycol = zeros((3,2,2), 'Float64')-1
data = table.cols.y[3:6]
if common.verbose:
print "Type of read:", type(data)
print "First 3 elements of read:", data[:3]
print "Length of the data read:", len(data)
if common.verbose:
print "ycol-->", ycol
print "data-->", data
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, NumArray))
# Check the type
self.assertEqual(data.type(), ycol.type())
self.assertTrue(allequal(ycol, data, "numarray"))
def test07b_modifyingRows(self):
"""Checking modifying several rows at once (using cols accessor)."""
table = self.fileh.root.table
# Read a chunk of the table
chunk = table[0:3]
# Modify it somewhat
chunk.field('y')[:] = -1
table.cols[3:6] = chunk
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
# Check that some column has been actually modified
ycol = zeros((3,2,2), 'Float64')-1
data = table.cols.y[3:6]
if common.verbose:
print "Type of read:", type(data)
print "First 3 elements of read:", data[:3]
print "Length of the data read:", len(data)
if common.verbose:
print "ycol-->", ycol
print "data-->", data
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, NumArray))
# Check the type
self.assertEqual(data.type(), ycol.type())
self.assertTrue(allequal(ycol, data, "numarray"))
def test08a_modifyingRows(self):
"""Checking modifying just one row at once (using modifyRows)."""
table = self.fileh.root.table
# Read a chunk of the table
chunk = table[3]
# Modify it somewhat
chunk.field('y')[:] = -1
table.modifyRows(6, 7, 1, chunk)
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
# Check that some column has been actually modified
ycol = zeros((2,2), 'Float64')-1
data = table.cols.y[6]
if common.verbose:
print "Type of read:", type(data)
print "First 3 elements of read:", data[:3]
print "Length of the data read:", len(data)
if common.verbose:
print "ycol-->", ycol
print "data-->", data
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, NumArray))
# Check the type
self.assertEqual(data.type(), ycol.type())
self.assertTrue(allequal(ycol, data, "numarray"))
def test08b_modifyingRows(self):
"""Checking modifying just one row at once (using cols accessor)."""
table = self.fileh.root.table
# Read a chunk of the table
chunk = table[3]
# Modify it somewhat
chunk['y'][:] = -1
table.cols[6] = chunk
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
# Check that some column has been actually modified
ycol = zeros((2,2), 'Float64')-1
data = table.cols.y[6]
if common.verbose:
print "Type of read:", type(data)
print "First 3 elements of read:", data[:3]
print "Length of the data read:", len(data)
if common.verbose:
print "ycol-->", ycol
print "data-->", data
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, NumArray))
# Check the type
self.assertEqual(data.type(), ycol.type())
self.assertTrue(allequal(ycol, data, "numarray"))
def test09a_getStrings(self):
"""Checking the return of string columns with spaces."""
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
rdata = table.getWhereList('color == "ab"')
data = table.readCoordinates(rdata)
if common.verbose:
print "Type of read:", type(data)
print "Description of the record:", data.descr
print "First 3 elements of read:", data[:3]
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, records.RecArray))
# Check that all columns have been selected
self.assertEqual(len(data), 100)
# Finally, check that the contents are ok
for idata in data.field('color'):
self.assertEqual(idata, "ab")
def test09b_getStrings(self):
"""Checking the return of string columns with spaces. (modify)"""
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
for i in range(50):
table.cols.color[i] = "a "
table.flush()
data = table[:]
if common.verbose:
print "Type of read:", type(data)
print "Description of the record:", data.descr
print "First 3 elements of read:", data[:3]
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, records.RecArray))
# Check that all columns have been selected
self.assertEqual(len(data), 100)
# Finally, check that the contents are ok
for i in range(100):
idata = data.field('color')[i]
if i >= 50:
self.assertEqual(idata, "ab")
else:
self.assertEqual(idata, "a")
def test09c_getStrings(self):
"""Checking the return of string columns with spaces. (append)"""
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
table = self.fileh.root.table
row = table.row
for i in range(50):
row["color"] = "a " # note the trailing spaces
row.append()
table.flush()
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
data = self.fileh.root.table[:]
if common.verbose:
print "Type of read:", type(data)
print "Description of the record:", data.descr
print "First 3 elements of read:", data[:3]
# Check that both numarray objects are equal
self.assertTrue(isinstance(data, records.RecArray))
# Check that all columns have been selected
self.assertEqual(len(data), 150)
# Finally, check that the contents are ok
# Finally, check that the contents are ok
for i in range(150):
idata = data.field('color')[i]
if i < 100:
self.assertEqual(idata, "ab")
else:
self.assertEqual(idata, "a")
class TableNativeFlavorOpenTestCase(TableNativeFlavorTestCase):
close = 0
class TableNativeFlavorCloseTestCase(TableNativeFlavorTestCase):
close = 1
class StrlenTestCase(common.PyTablesTestCase):
def setUp(self):
# Create an instance of an HDF5 Table
self.file = tempfile.mktemp(".h5")
self.fileh = openFile(self.file, "w")
group = self.fileh.createGroup(self.fileh.root, 'group')
tablelayout = {'Text': StringCol(itemsize=1000),}
self.table = self.fileh.createTable(group, 'table', tablelayout)
self.table.flavor = 'numarray'
row = self.table.row
row['Text'] = 'Hello Francesc!'
row.append()
row['Text'] = 'Hola Francesc!'
row.append()
self.table.flush()
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
def test01(self):
"""Checking the lengths of strings (read field)."""
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
self.table = self.fileh.root.group.table
# Get both strings
str1 = self.table.col('Text')[0]
str2 = self.table.col('Text')[1]
if common.verbose:
print "string1-->", str1
print "string2-->", str2
# Check that both numarray objects are equal
self.assertEqual(len(str1), len('Hello Francesc!'))
self.assertEqual(len(str2), len('Hola Francesc!'))
self.assertEqual(str1, 'Hello Francesc!')
self.assertEqual(str2, 'Hola Francesc!')
def test02(self):
"""Checking the lengths of strings (read recarray)."""
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
self.table = self.fileh.root.group.table
# Get both strings
str1 = self.table[:].field('Text')[0]
str2 = self.table[:].field('Text')[1]
# Check that both numarray objects are equal
self.assertEqual(len(str1), len('Hello Francesc!'))
self.assertEqual(len(str2), len('Hola Francesc!'))
self.assertEqual(str1, 'Hello Francesc!')
self.assertEqual(str2, 'Hola Francesc!')
def test03(self):
"""Checking the lengths of strings (read recarray, row by row)."""
if self.close:
self.fileh.close()
self.fileh = openFile(self.file, "a")
self.table = self.fileh.root.group.table
# Get both strings
str1 = self.table[0].field('Text')
str2 = self.table[1].field('Text')
# Check that both numarray objects are equal
self.assertEqual(len(str1), len('Hello Francesc!'))
self.assertEqual(len(str2), len('Hola Francesc!'))
self.assertEqual(str1, 'Hello Francesc!')
self.assertEqual(str2, 'Hola Francesc!')
class StrlenOpenTestCase(StrlenTestCase):
close = 0
class StrlenCloseTestCase(StrlenTestCase):
close = 1
class ScalarTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test(self):
"""Reading scalar arrays (see #98)."""
arr = self.h5file.createArray('/', 'scalar_na', 1234)
arr.flavor = 'numarray'
self._reopen()
arr = self.h5file.root.scalar_na
common.verbosePrint("* %r == %r ?" % (arr.read(), array(1234)))
self.assertTrue(all(arr.read() == array(1234)))
common.verbosePrint("* %r == %r ?" % (arr[()], array(1234)))
self.assertTrue(all(arr[()] == 1234))
#--------------------------------------------------------
def suite():
theSuite = unittest.TestSuite()
niter = 1
#theSuite.addTest(unittest.makeSuite(StrlenOpenTestCase))
#theSuite.addTest(unittest.makeSuite(Basic0DOneTestCase))
#theSuite.addTest(unittest.makeSuite(GroupsArrayTestCase))
for i in range(niter):
theSuite.addTest(unittest.makeSuite(Basic0DOneTestCase))
theSuite.addTest(unittest.makeSuite(Basic0DTwoTestCase))
theSuite.addTest(unittest.makeSuite(Basic1DOneTestCase))
theSuite.addTest(unittest.makeSuite(Basic1DTwoTestCase))
theSuite.addTest(unittest.makeSuite(Basic1DThreeTestCase))
theSuite.addTest(unittest.makeSuite(Basic2DTestCase))
theSuite.addTest(unittest.makeSuite(GroupsArrayTestCase))
theSuite.addTest(unittest.makeSuite(TableReadTestCase))
theSuite.addTest(unittest.makeSuite(TableNativeFlavorOpenTestCase))
theSuite.addTest(unittest.makeSuite(TableNativeFlavorCloseTestCase))
theSuite.addTest(unittest.makeSuite(StrlenOpenTestCase))
theSuite.addTest(unittest.makeSuite(StrlenCloseTestCase))
theSuite.addTest(unittest.makeSuite(ScalarTestCase))
if common.heavy:
theSuite.addTest(unittest.makeSuite(Basic10DTestCase))
return theSuite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
```
#### File: tables/tests/test_types.py
```python
import sys
import unittest
import os
from tables import *
from tables.tests import common
# To delete the internal attributes automagically
unittest.TestCase.tearDown = common.cleanup
# Test Record class
class Record(IsDescription):
var1 = StringCol(itemsize=4) # 4-character String
var2 = Col.from_kind('int') # integer
var3 = Col.from_kind('int', itemsize=2) # short integer
var4 = Col.from_kind('float') # double (double-precision)
var5 = Col.from_kind('float', itemsize=4) # float (single-precision)
class RangeTestCase(unittest.TestCase):
file = "test.h5"
title = "This is the table title"
expectedrows = 100
maxshort = 2 ** 15
maxint = 2147483648 # (2 ** 31)
compress = 0
def setUp(self):
# Create an instance of HDF5 Table
self.fileh = openFile(self.file, mode = "w")
self.rootgroup = self.fileh.root
# Create a table
self.table = self.fileh.createTable(self.rootgroup, 'table',
Record, self.title)
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
#----------------------------------------
def test00_range(self):
"""Testing the range check"""
rec = self.table.row
# Save a record
i = self.maxshort
rec['var1'] = '%04d' % (i)
rec['var2'] = i
rec['var3'] = i
rec['var4'] = float(i)
rec['var5'] = float(i)
try:
rec.append()
except ValueError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print "\nGreat!, the next ValueError was catched!"
print value
pass
else:
if common.verbose:
print "\nNow, the range overflow no longer issues a ValueError"
def test01_type(self):
"""Testing the type check"""
rec = self.table.row
# Save a record
i = self.maxshort
rec['var1'] = '%04d' % (i)
rec['var2'] = i
rec['var3'] = i % self.maxshort
rec['var5'] = float(i)
try:
rec['var4'] = "124c"
except TypeError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print "\nGreat!, the next TypeError was catched!"
print value
pass
else:
print rec
self.fail("expected a TypeError")
# Check the dtype read-only attribute
class DtypeTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test00a_table(self):
"""Check dtype accessor for Table objects"""
a = self.h5file.createTable('/', 'table', Record)
self.assertEqual(a.dtype, a.description._v_dtype)
def test00b_column(self):
"""Check dtype accessor for Column objects"""
a = self.h5file.createTable('/', 'table', Record)
c = a.cols.var3
self.assertEqual(c.dtype, a.description._v_dtype['var3'])
def test01_array(self):
"""Check dtype accessor for Array objects"""
a = self.h5file.createArray('/', 'array', [1,2])
self.assertEqual(a.dtype, a.atom.dtype)
def test02_carray(self):
"""Check dtype accessor for CArray objects"""
a = self.h5file.createCArray('/', 'array', FloatAtom(), [1,2])
self.assertEqual(a.dtype, a.atom.dtype)
def test03_carray(self):
"""Check dtype accessor for EArray objects"""
a = self.h5file.createEArray('/', 'array', FloatAtom(), [0,2])
self.assertEqual(a.dtype, a.atom.dtype)
def test04_vlarray(self):
"""Check dtype accessor for VLArray objects"""
a = self.h5file.createVLArray('/', 'array', FloatAtom())
self.assertEqual(a.dtype, a.atom.dtype)
#----------------------------------------------------------------------
def suite():
import doctest
import tables.atom
theSuite = unittest.TestSuite()
for i in range(1):
theSuite.addTest(doctest.DocTestSuite(tables.atom))
theSuite.addTest(unittest.makeSuite(RangeTestCase))
theSuite.addTest(unittest.makeSuite(DtypeTestCase))
return theSuite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
```
#### File: PyTables/tables/undoredo.py
```python
from tables.path import splitPath
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
__version__ = '$Revision$'
"""Repository version of this file."""
def undo(file_, operation, *args):
if operation == 'CREATE':
undoCreate(file_, args[0])
elif operation == 'REMOVE':
undoRemove(file_, args[0])
elif operation == 'MOVE':
undoMove(file_, args[0], args[1])
elif operation == 'ADDATTR':
undoAddAttr(file_, args[0], args[1])
elif operation == 'DELATTR':
undoDelAttr(file_, args[0], args[1])
else:
raise NotImplementedError("""\
the requested unknown operation %r can not be undone; \
please report this to the authors""" % operation)
def redo(file_, operation, *args):
if operation == 'CREATE':
redoCreate(file_, args[0])
elif operation == 'REMOVE':
redoRemove(file_, args[0])
elif operation == 'MOVE':
redoMove(file_, args[0], args[1])
elif operation == 'ADDATTR':
redoAddAttr(file_, args[0], args[1])
elif operation == 'DELATTR':
redoDelAttr(file_, args[0], args[1])
else:
raise NotImplementedError("""\
the requested unknown operation %r can not be redone; \
please report this to the authors""" % operation)
def moveToShadow(file_, path):
node = file_._getNode(path)
(shparent, shname) = file_._shadowName()
node._g_move(shparent, shname)
def moveFromShadow(file_, path):
(shparent, shname) = file_._shadowName()
node = shparent._f_getChild(shname)
(pname, name) = splitPath(path)
parent = file_._getNode(pname)
node._g_move(parent, name)
def undoCreate(file_, path):
moveToShadow(file_, path)
def redoCreate(file_, path):
moveFromShadow(file_, path)
def undoRemove(file_, path):
moveFromShadow(file_, path)
def redoRemove(file_, path):
moveToShadow(file_, path)
def undoMove(file_, origpath, destpath):
(origpname, origname) = splitPath(origpath)
node = file_._getNode(destpath)
origparent = file_._getNode(origpname)
node._g_move(origparent, origname)
def redoMove(file_, origpath, destpath):
(destpname, destname) = splitPath(destpath)
node = file_._getNode(origpath)
destparent = file_._getNode(destpname)
node._g_move(destparent, destname)
def attrToShadow(file_, path, name):
node = file_._getNode(path)
attrs = node._v_attrs
value = getattr(attrs, name)
(shparent, shname) = file_._shadowName()
shattrs = shparent._v_attrs
# Set the attribute only if it has not been kept in the shadow.
# This avoids re-pickling complex attributes on REDO.
if not shname in shattrs:
shattrs._g__setattr(shname, value)
attrs._g__delattr(name)
def attrFromShadow(file_, path, name):
(shparent, shname) = file_._shadowName()
shattrs = shparent._v_attrs
value = getattr(shattrs, shname)
node = file_._getNode(path)
node._v_attrs._g__setattr(name, value)
# Keeping the attribute in the shadow allows reusing it on Undo/Redo.
##shattrs._g__delattr(shname)
def undoAddAttr(file_, path, name):
attrToShadow(file_, path, name)
def redoAddAttr(file_, path, name):
attrFromShadow(file_, path, name)
def undoDelAttr(file_, path, name):
attrFromShadow(file_, path, name)
def redoDelAttr(file_, path, name):
attrToShadow(file_, path, name)
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## End:
``` |
{
"source": "joshmoore/slicedimage",
"score": 2
} |
#### File: slicedimage/backends/_caching.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import io
import warnings
from diskcache import Cache
from ._base import Backend
SIZE_LIMIT = 5e9
CACHE_VERSION = "v0"
class CachingBackend(Backend):
def __init__(self, cacheroot, authoritative_backend):
self._cacheroot = cacheroot
self._authoritative_backend = authoritative_backend
self._cache = Cache(cacheroot, size_limit=int(SIZE_LIMIT))
def read_contextmanager(self, name, checksum_sha256=None, seekable=False):
if checksum_sha256 is not None:
return _CachingBackendContextManager(
self._authoritative_backend, self._cache, name, checksum_sha256)
else:
return self._authoritative_backend.read_contextmanager(
name, checksum_sha256, seekable=seekable)
def write_file_handle(self, name):
return self._authoritative_backend.write_file_handle(name)
class _CachingBackendContextManager(object):
def __init__(self, authoritative_backend, cache, name, checksum_sha256):
self.authoritative_backend = authoritative_backend
self.cache = cache
self.name = name
self.checksum_sha256 = checksum_sha256
self.handle = None
def __enter__(self):
cache_key = "{}-{}".format(CACHE_VERSION, self.checksum_sha256)
try:
file_data = self.cache.read(cache_key)
except KeyError:
# not in cache :(
with self.authoritative_backend.read_contextmanager(self.name) as sfh:
file_data = sfh.read()
# TODO: consider removing this if we land a more generalized solution that
# protects against corruption regardless of backend.
sha256 = hashlib.sha256(file_data).hexdigest()
if sha256 != self.checksum_sha256:
warnings.warn(
"Checksum of tile data does not match the manifest checksum! Not "
"writing to cache")
else:
self.cache.set(cache_key, file_data)
self.handle = io.BytesIO(file_data)
else:
# If the data is small enough, the DiskCache library returns the cache data
# as bytes instead of a buffered reader.
# In that case, we want to wrap it in a file-like object.
if isinstance(file_data, io.IOBase):
self.handle = file_data
else:
self.handle = io.BytesIO(file_data)
return self.handle.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.handle is not None:
return self.handle.__exit__(exc_type, exc_val, exc_tb)
```
#### File: slicedimage/slicedimage/urlpath.py
```python
import posixpath
from six.moves import urllib
def pathsplit(url):
parsed = urllib.parse.urlparse(url)
return (
urllib.parse.urlunparse(
(parsed.scheme,
parsed.netloc,
posixpath.dirname(parsed.path),
parsed.params,
parsed.query,
parsed.fragment),
),
posixpath.basename(parsed.path),
)
def pathjoin(url, *segments):
parsed = urllib.parse.urlparse(url)
return urllib.parse.urlunparse(
(parsed.scheme,
parsed.netloc,
posixpath.join(parsed.path, *segments),
parsed.params,
parsed.query,
parsed.fragment))
```
#### File: io/v0_0_0/test_write.py
```python
import codecs
import json
import numpy
import os
import tempfile
import unittest
import slicedimage
from tests.utils import TemporaryDirectory
baseurl = "file://{}".format(os.path.abspath(os.path.dirname(__file__)))
class TestWrite(unittest.TestCase):
def test_write_tileset(self):
image = slicedimage.TileSet(
["x", "y", "ch", "hyb"],
{'ch': 2, 'hyb': 2},
(100, 100),
)
for hyb in range(2):
for ch in range(2):
tile = slicedimage.Tile(
{
'x': (0.0, 0.01),
'y': (0.0, 0.01),
},
{
'hyb': hyb,
'ch': ch,
},
)
tile.numpy_array = numpy.zeros((100, 100))
tile.numpy_array[hyb, ch] = 1
image.add_tile(tile)
with TemporaryDirectory() as tempdir, \
tempfile.NamedTemporaryFile(suffix=".json", dir=tempdir) as partition_file:
partition_doc = slicedimage.v0_0_0.Writer().generate_partition_document(
image, partition_file.name)
writer = codecs.getwriter("utf-8")
json.dump(partition_doc, writer(partition_file))
partition_file.flush()
basename = os.path.basename(partition_file.name)
baseurl = "file://{}".format(os.path.dirname(partition_file.name))
loaded = slicedimage.Reader.parse_doc(basename, baseurl)
for hyb in range(2):
for ch in range(2):
tiles = [_tile
for _tile in loaded.tiles(
lambda tile: (tile.indices['hyb'] == hyb and
tile.indices['ch'] == ch))]
self.assertEqual(len(tiles), 1)
expected = numpy.zeros((100, 100))
expected[hyb, ch] = 1
self.assertEqual(tiles[0].numpy_array.all(), expected.all())
self.assertIsNotNone(tiles[0].sha256)
def test_write_collection(self):
image = slicedimage.TileSet(
["x", "y", "ch", "hyb"],
{'ch': 2, 'hyb': 2},
(100, 100),
)
for hyb in range(2):
for ch in range(2):
tile = slicedimage.Tile(
{
'x': (0.0, 0.01),
'y': (0.0, 0.01),
},
{
'hyb': hyb,
'ch': ch,
},
)
tile.numpy_array = numpy.zeros((100, 100))
tile.numpy_array[hyb, ch] = 1
image.add_tile(tile)
collection = slicedimage.Collection()
collection.add_partition("fov002", image)
with TemporaryDirectory() as tempdir, \
tempfile.NamedTemporaryFile(suffix=".json", dir=tempdir) as partition_file:
partition_doc = slicedimage.v0_0_0.Writer().generate_partition_document(
collection, partition_file.name)
writer = codecs.getwriter("utf-8")
json.dump(partition_doc, writer(partition_file))
partition_file.flush()
basename = os.path.basename(partition_file.name)
baseurl = "file://{}".format(os.path.dirname(partition_file.name))
loaded = slicedimage.Reader.parse_doc(basename, baseurl)
for hyb in range(2):
for ch in range(2):
tiles = [_tile
for _tile in loaded.tiles(
lambda tile: (tile.indices['hyb'] == hyb and
tile.indices['ch'] == ch))]
self.assertEqual(len(tiles), 1)
expected = numpy.zeros((100, 100))
expected[hyb, ch] = 1
self.assertEqual(tiles[0].numpy_array.all(), expected.all())
self.assertIsNotNone(tiles[0].sha256)
``` |
{
"source": "joshmoore/starfish",
"score": 3
} |
#### File: starfish/examples/build_sample_experiment.py
```python
import argparse
import json
from examples.support import AUX_IMAGE_NAMES, write_experiment_json
from starfish.constants import Indices
from starfish.util.argparse import FsExistsType
class StarfishIndex:
def __call__(self, spec_json):
try:
spec = json.loads(spec_json)
except json.decoder.JSONDecodeError:
raise argparse.ArgumentTypeError("Could not parse {} into a valid index specification.".format(spec_json))
return {
Indices.HYB: spec.get(Indices.HYB, 1),
Indices.CH: spec.get(Indices.CH, 1),
Indices.Z: spec.get(Indices.Z, 1),
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"output_dir",
type=FsExistsType())
parser.add_argument(
"--fov-count",
type=int,
required=True,
help="Number of FOVs in this experiment.")
parser.add_argument(
"--hybridization-dimensions",
type=StarfishIndex(),
required=True,
help="Dimensions for the hybridization images. Should be a json dict, with {}, {}, and {} as the possible "
"keys. The value should be the shape along that dimension. If a key is not present, the value is "
"assumed to be 0.".format(
Indices.HYB.value,
Indices.CH.value,
Indices.Z.value))
name_arg_map = dict()
for aux_image_name in AUX_IMAGE_NAMES:
arg = parser.add_argument(
"--{}-dimensions".format(aux_image_name),
type=StarfishIndex(),
help="Dimensions for the {} images. Should be a json dict, with {}, {}, and {} as the possible keys. The "
"value should be the shape along that dimension. If a key is not present, the value is assumed to be "
"0.".format(aux_image_name, Indices.HYB.value, Indices.CH.value, Indices.Z.value))
name_arg_map[aux_image_name] = arg.dest
args = parser.parse_args()
write_experiment_json(
args.output_dir, args.fov_count, args.hybridization_dimensions,
{
aux_image_name: getattr(args, name_arg_map[aux_image_name])
for aux_image_name in AUX_IMAGE_NAMES
}
)
```
#### File: starfish/image/_stack.py
```python
import collections
import os
from functools import partial
from itertools import product
from typing import Any, Callable, Iterable, Iterator, Mapping, MutableSequence, Optional, Sequence, Tuple, Union
from warnings import warn
import numpy
import pandas as pd
from scipy.stats import scoreatpercentile
from skimage import exposure
from slicedimage import Reader, Writer
from slicedimage.io import resolve_path_or_url
from tqdm import tqdm
from starfish.constants import Coordinates, Indices
from starfish.errors import DataFormatWarning
from starfish.pipeline.features.spot_attributes import SpotAttributes
class ImageStack:
"""Container for a TileSet (field of view)
Methods
-------
get_slice retrieve a slice of the image tensor
set_slice set a slice of the image tensor
apply apply a 2d or 3d function across all Tiles in the image tensor
max_proj return a max projection over one or more axis of the image tensor
show_stack show an interactive, pageable view of the image tensor, or a slice of the image tensor
write save the (potentially modified) image tensor to disk
Properties
----------
num_chs the number of channels stored in the image tensor
num_hybs the number of hybridization rounds stored in the image tensor
num_zlayers the number of z-layers stored in the image tensor
numpy_array the 5-d image tensor is stored in this array
raw_shape the shape of the image tensor (in integers)
shape the shape of the image tensor by categorical index (channels, hybridization rounds, z-layers)
"""
AXES_MAP = {
Indices.HYB: 0,
Indices.CH: 1,
Indices.Z: 2,
}
N_AXES = max(AXES_MAP.values()) + 1
def __init__(self, image_partition):
self._image_partition = image_partition
self._num_hybs = image_partition.get_dimension_shape(Indices.HYB)
self._num_chs = image_partition.get_dimension_shape(Indices.CH)
if Indices.Z in image_partition.dimensions:
self._num_zlayers = image_partition.get_dimension_shape(Indices.Z)
else:
self._num_zlayers = 1
self._tile_shape = image_partition.default_tile_shape
# Examine the tiles to figure out the right kind (int, float, etc.) and size. We require that all the tiles
# have the same kind of data type, but we do not require that they all have the same size of data type. The
# allocated array is the highest size we encounter.
kind = None
max_size = 0
for tile in self._image_partition.tiles():
dtype = tile.numpy_array.dtype
if kind is None:
kind = dtype.kind
else:
if kind != dtype.kind:
raise TypeError("All tiles should have the same kind of dtype")
if dtype.itemsize > max_size:
max_size = dtype.itemsize
if self._tile_shape is None:
self._tile_shape = tile.tile_shape
elif tile.tile_shape is not None and self._tile_shape != tile.tile_shape:
raise ValueError("Starfish does not support tiles that are not identical in shape")
# now that we know the tile data type (kind and size), we can allocate the data array.
self._data = numpy.zeros(
shape=(self._num_hybs, self._num_chs, self._num_zlayers) + self._tile_shape,
dtype=numpy.dtype(f"{kind}{max_size}")
)
# iterate through the tiles and set the data.
for tile in self._image_partition.tiles():
h = tile.indices[Indices.HYB]
c = tile.indices[Indices.CH]
zlayer = tile.indices.get(Indices.Z, 0)
data = tile.numpy_array
if max_size != data.dtype.itemsize:
if data.dtype.kind == "i" or data.dtype.kind == "u":
# fixed point can be done with a simple multiply.
src_range = numpy.iinfo(data.dtype).max - numpy.iinfo(data.dtype).min + 1
dst_range = numpy.iinfo(self._data.dtype).max - numpy.iinfo(self._data.dtype).min + 1
data = data * (dst_range / src_range)
warn(
f"Tile "
f"(H: {tile.indices[Indices.HYB]} C: {tile.indices[Indices.CH]} Z: {tile.indices[Indices.Z]}) has "
f"dtype {data.dtype}. One or more tiles is of a larger dtype {self._data.dtype}.",
DataFormatWarning)
self.set_slice(indices={Indices.HYB: h, Indices.CH: c, Indices.Z: zlayer}, data=data)
# set_slice will mark the data as needing writeback, so we need to unset that.
self._data_needs_writeback = False
@classmethod
def from_url(cls, url: str, baseurl: Optional[str]):
"""
Constructs an ImageStack object from a URL and a base URL.
The following examples will all load from the same location:
1. url: https://www.example.com/images/hybridization.json baseurl: None
2. url: https://www.example.com/images/hybridization.json baseurl: I_am_ignored
3. url: hybridization.json baseurl: https://www.example.com/images
4. url: images/hybridization.json baseurl: https://www.example.com
Parameters:
-----------
url : str
Either an absolute URL or a relative URL referring to the image to be read.
baseurl : Optional[str]
If url is a relative URL, then this must be provided. If url is an absolute URL, then this parameter is
ignored.
"""
image_partition = Reader.parse_doc(url, baseurl)
return cls(image_partition)
@classmethod
def from_path_or_url(cls, url_or_path: str) -> "ImageStack":
"""
Constructs an ImageStack object from an absolute URL or a filesystem path.
The following examples will all load from the same location:
1. url_or_path: file:///Users/starfish-user/images/hybridization.json
2. url_or_path: /Users/starfish-user/images/hybridization.json
Parameters:
-----------
url_or_path : str
Either an absolute URL or a filesystem path to an imagestack.
"""
_, relativeurl, baseurl = resolve_path_or_url(url_or_path)
return cls.from_url(relativeurl, baseurl)
@property
def numpy_array(self):
"""Retrieves a view of the image data as a numpy array."""
result = self._data.view()
result.setflags(write=False)
return result
@numpy_array.setter
def numpy_array(self, data):
"""Sets the image's data from a numpy array. The numpy array is advised to be immutable afterwards."""
self._data = data.view()
self._data_needs_writeback = True
data.setflags(write=False)
def get_slice(
self,
indices: Mapping[Indices, Union[int, slice]]
) -> Tuple[numpy.ndarray, Sequence[Indices]]:
"""
Given a dictionary mapping the index name to either a value or a slice range, return a numpy array representing
the slice, and a list of the remaining axes beyond the normal x-y tile.
Example:
ImageStack axes: H, C, and Z with shape 3, 4, 5, respectively.
ImageStack Implicit axes: X, Y with shape 10, 20, respectively.
Called to slice with indices {Z: 5}.
Result: a 4-dimensional numpy array with shape (3, 4, 20, 10) and the remaining axes [H, C].
Example:
Original axes: H, C, and Z.
Implicit axes: X, Y.
Called to slice with indices {Z: 5, C: slice(2, 4)}.
Result: a 4-dimensional numpy array with shape (3, 2, 20, 10) and the remaining axes [H, C].
"""
slice_list, axes = self._build_slice_list(indices)
result = self._data[slice_list]
result.setflags(write=False)
return result, axes
def set_slice(
self,
indices: Mapping[Indices, Union[int, slice]],
data: numpy.ndarray,
axes: Sequence[Indices]=None):
"""
Given a dictionary mapping the index name to either a value or a slice range and a source numpy array, set the
slice of the array of this ImageStack to the values in the source numpy array. If the optional parameter axes
is provided, that represents the axes of the numpy array beyond the x-y tile.
Example:
ImageStack axes: H, C, and Z with shape 3, 4, 5, respectively.
ImageStack Implicit axes: X, Y with shape 10, 20, respectively.
Called to set a slice with indices {Z: 5}.
Data: a 4-dimensional numpy array with shape (3, 4, 10, 20)
Result: Replace the data for Z=5.
Example:
ImageStack axes: H, C, and Z. (shape 3, 4, 5)
ImageStack Implicit axes: X, Y. (shape 10, 20)
Called to set a slice with indices {Z: 5, C: slice(2, 4)}.
Data: a 4-dimensional numpy array with shape (3, 2, 10, 20)
Result: Replace the data for Z=5, C=2-3.
"""
slice_list, expected_axes = self._build_slice_list(indices)
if axes is not None:
if len(axes) != len(data.shape) - 2:
raise ValueError("data shape ({}) should be the axes ({}) and (x,y).".format(data.shape, axes))
move_src = list()
move_dst = list()
for src_idx, axis in enumerate(axes):
try:
dst_idx = expected_axes.index(axis)
except ValueError:
raise ValueError("Unexpected axis {}. Expecting only {}.".format(axis, expected_axes))
if src_idx != dst_idx:
move_src.append(src_idx)
move_dst.append(dst_idx)
if len(move_src) != 0:
data = data.view()
numpy.moveaxis(data, move_src, move_dst)
if self._data[slice_list].shape != data.shape:
raise ValueError("source shape {} mismatches destination shape {}".format(
data.shape, self._data[slice_list].shape))
self._data[slice_list] = data
self._data_needs_writeback = True
def show_stack(
self, indices: Mapping[Indices, Union[int, slice]],
color_map: str= 'gray', figure_size: Tuple[int, int]=(10, 10),
show_spots: Optional[SpotAttributes]=None,
rescale: bool=False, p_min: Optional[float]=None, p_max: Optional[float]=None, **kwargs):
"""Create an interactive visualization of an image stack
Produces a slider that flips through the selected volume tile-by-tile. Supports manual adjustment of dynamic
range.
Parameters
----------
indices : Mapping[Indices, Union[int, slice]],
Indices to select a volume to visualize. Passed to `Image.get_slice()`.
See `Image.get_slice()` for examples.
color_map : str (default = 'gray')
string id of a matplotlib colormap
figure_size : Tuple[int, int] (default = (10, 10))
size of the figure in inches
show_spots : Optional[SpotAttributes]
[Preliminary functionality] if provided, should be a SpotAttribute table that corresponds
to the volume being displayed. This will be paired automatically in the future.
rescale : bool (default = False)
if True, rescale the data to exclude high and low-value outliers (see skimage.exposure.rescale_intensity).
p_min: float
clip values below this intensity percentile. If provided, overrides rescale, above. (default = None)
p_max: float
clip values above this intensity percentile. If provided, overrides rescale, above. (default = None)
Raises
------
ValueError :
User must select one of rescale or p_min/p_max to adjust the image dynamic range. If both are selected, a
ValueError is raised.
Notes
-----
For this widget to function interactively in the notebook, after ipywidgets has been installed, the user must
register the widget with jupyter by typing the following command into the terminal:
jupyter nbextension enable --py widgetsnbextension
"""
from ipywidgets import interact
import matplotlib.pyplot as plt
if not indices:
raise ValueError('indices may not be an empty dict or None')
# get the requested chunk, linearize the remaining data into a sequence of tiles
data, remaining_inds = self.get_slice(indices)
# identify the dimensionality of data with all dimensions other than x, y linearized
n = numpy.dot(*data.shape[:-2])
# linearize the array
linear_view: numpy.ndarray = data.reshape((n,) + data.shape[-2:])
# set the labels for the linearized tiles
labels = []
for index, size in zip(remaining_inds, data.shape[:-2]):
labels.append([f'{index}{n}' for n in range(size)])
labels = list(product(*labels))
n = linear_view.shape[0]
if rescale and any((p_min, p_max)):
raise ValueError('select one of rescale and p_min/p_max to rescale image, not both.')
elif rescale is not None:
print("Rescaling ...")
vmin, vmax = scoreatpercentile(data, (0.5, 99.5))
linear_view = exposure.rescale_intensity(
linear_view,
in_range=(vmin, vmax),
out_range=numpy.float32
).astype(numpy.float32)
elif p_min or p_max:
print("Clipping ...")
a_min, a_max = scoreatpercentile(
linear_view,
(p_min if p_min else 0, p_max if p_max else 100)
)
linear_view = numpy.clip(linear_view, a_min=a_min, a_max=a_max)
show_spot_function = self._show_spots
def show_plane(ax, plane, plane_index, cmap="gray", title=None):
ax.imshow(plane, cmap=cmap)
if show_spots:
# this is slow. This link might have something to help:
# https://bastibe.de/2013-05-30-speeding-up-matplotlib.html
show_spot_function(show_spots.data, ax=ax, z=plane_index, **kwargs)
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title)
@interact(plane_index=(0, n - 1))
def display_slice(plane_index=0):
fig, ax = plt.subplots(figsize=figure_size)
show_plane(ax, linear_view[plane_index], plane_index, title=f'{labels[plane_index]}', cmap=color_map)
plt.show()
return display_slice
@staticmethod
def _show_spots(result_df, ax, z=None, size=1, z_dist=1.5, scale_radius=5) -> None:
"""function to plot spot finding results on top of any image as hollow red circles
called spots are colored by category
Parameters:
-----------
img : np.ndarray[Any]
2-d image of any dtype
result_df : pd.Dataframe
result dataframe containing spot calls that correspond to the image channel
z : Optional[int]
If provided, z-plane to plot spot calls for. Default (None): plot all provided spots.
size : int
width of line to plot around the identified spot
z_dist : float
plot spots if within this distance of the z-plane. Ignored if z is not passed.
vmin, vmax : int
clipping thresholds for the image plot
ax, matplotlib.Axes.Axis
axis to plot spots on
"""
import matplotlib.pyplot as plt
if z is not None and z in result_df.columns:
inds = numpy.abs(result_df['z'] - z) < z_dist
else:
inds = numpy.ones(result_df.shape[0]).astype(bool)
# get the data needed to plot
selected = result_df.loc[inds, ['r', 'x', 'y']]
for i in numpy.arange(selected.shape[0]):
r, x, y = selected.iloc[i, :] # radius is a duplicate, and is present twice
c = plt.Circle((x, y), r * scale_radius, color='r', linewidth=size, fill=False)
ax.add_patch(c)
def _build_slice_list(
self,
indices: Mapping[Indices, Union[int, slice]]
) -> Tuple[Tuple[Union[int, slice], ...], Sequence[Indices]]:
slice_list: MutableSequence[Union[int, slice]] = [
slice(None, None, None)
for _ in range(ImageStack.N_AXES)
]
axes = []
removed_axes = set()
for name, value in indices.items():
idx = ImageStack.AXES_MAP[name]
if not isinstance(value, slice):
removed_axes.add(name)
slice_list[idx] = value
for dimension_value, dimension_name in sorted([
(dimension_value, dimension_name)
for dimension_name, dimension_value in ImageStack.AXES_MAP.items()
]):
if dimension_name not in removed_axes:
axes.append(dimension_name)
return tuple(slice_list), axes
def _iter_indices(self, is_volume: bool=False) -> Iterator[Mapping[Indices, int]]:
"""Iterate over indices of image tiles or image volumes if is_volume is True
Parameters
----------
is_volume, bool
If True, yield indices necessary to extract volumes from self, else return
indices for tiles
Yields
------
Dict[str, int]
Mapping of dimension name to index
"""
for hyb in numpy.arange(self.shape[Indices.HYB]):
for ch in numpy.arange(self.shape[Indices.CH]):
if is_volume:
yield {Indices.HYB: hyb, Indices.CH: ch}
else:
for z in numpy.arange(self.shape[Indices.Z]):
yield {Indices.HYB: hyb, Indices.CH: ch, Indices.Z: z}
def _iter_tiles(
self, indices: Iterable[Mapping[Indices, Union[int, slice]]]
) -> Iterable[numpy.ndarray]:
"""Given an iterable of indices, return a generator of numpy arrays from self
Parameters
----------
indices, Iterable[Mapping[str, int]]
Iterable of indices that map a dimension (str) to a value (int)
Yields
------
numpy.ndarray
Numpy array that corresponds to provided indices
"""
for inds in indices:
array, axes = self.get_slice(inds)
yield array
def apply(self, func, is_volume=False, in_place=True, verbose: bool=False, **kwargs):
"""Apply func over all tiles or volumes in self
Parameters
----------
func : Callable
Function to apply. must expect a first argument which is a 2d or 3d numpy array (see is_volume) and return a
numpy.ndarray. If inplace is True, must return an array of the same shape.
is_volume : bool
(default False) If True, pass 3d volumes (x, y, z) to func
in_place : bool
(default True) If True, function is executed in place. If n_proc is not 1, the tile or
volume will be copied once during execution. If false, the outputs of the function executed on individual
tiles or volumes will be output as a list
verbose : bool
If True, report on the percentage completed (default = False) during processing
kwargs : dict
Additional arguments to pass to func
Returns
-------
Optional[List[Tuple[np.ndarray, Mapping[Indices: Union[int, slice]]]]
If inplace is False, return the results of applying func to stored image data
"""
mapfunc: Callable = map # TODO: ambrosejcarr posix-compliant multiprocessing
indices = list(self._iter_indices(is_volume=is_volume))
if verbose:
tiles = tqdm(self._iter_tiles(indices))
else:
tiles = self._iter_tiles(indices)
applyfunc: Callable = partial(func, **kwargs)
results = mapfunc(applyfunc, tiles)
# TODO ttung: this should return an ImageStack, not a bunch of indices.
if not in_place:
return list(zip(results, indices))
for r, inds in zip(results, indices):
self.set_slice(inds, r)
@property
def tile_metadata(self) -> pd.DataFrame:
"""return a table containing Tile metadata
Returns
-------
pd.DataFrame :
dataframe containing per-tile metadata information for each image. Guaranteed to include information on
channel, hybridization round, z_layer, and barcode index. Also contains any information stored in the
extras field for each tile in hybridization.json
"""
data: collections.defaultdict = collections.defaultdict(list)
index_keys = set(
key
for tile in self._image_partition.tiles()
for key in tile.indices.keys())
extras_keys = set(
key
for tile in self._image_partition.tiles()
for key in tile.extras.keys())
duplicate_keys = index_keys.intersection(extras_keys)
if len(duplicate_keys) > 0:
duplicate_keys_str = ", ".join([str(key) for key in duplicate_keys])
raise ValueError(
f"keys ({duplicate_keys_str}) was found in both the Tile specification and extras field. Tile "
f"specification keys may not be duplicated in the extras field.")
for tile in self._image_partition.tiles():
for k in index_keys:
data[k].append(tile.indices.get(k, None))
for k in extras_keys:
data[k].append(tile.extras.get(k, None))
if 'barcode_index' not in tile.extras:
hyb = tile.indices[Indices.HYB]
ch = tile.indices[Indices.CH]
z = tile.indices.get(Indices.Z, 0)
barcode_index = (((z * self.num_hybs) + hyb) * self.num_chs) + ch
data['barcode_index'].append(barcode_index)
return pd.DataFrame(data)
@property
def raw_shape(self) -> Tuple[int]:
"""
Returns the shape of the space that this image inhabits. It does not include the dimensions of the image
itself. For instance, if this is an X-Y image in a C-H-Y-X space, then the shape would include the dimensions C
and H.
Returns
-------
Tuple[int] :
The sizes of the indices.
"""
return self._data.shape
@property
def shape(self) -> collections.OrderedDict:
"""
Returns the shape of the space that this image inhabits. It does not include the dimensions of the image
itself. For instance, if this is an X-Y image in a C-H-Y-X space, then the shape would include the dimensions C
and H.
Returns
-------
An ordered mapping between index names to the size of the index.
"""
# TODO: (ttung) Note that the return type should be ..OrderedDict[Any, str], but python3.6 has a bug where this
# breaks horribly. Can't find a bug id to link to, but see
# https://stackoverflow.com/questions/41207128/how-do-i-specify-ordereddict-k-v-types-for-mypy-type-annotation
result: collections.OrderedDict[Any, str] = collections.OrderedDict()
for name, idx in ImageStack.AXES_MAP.items():
result[name] = self._data.shape[idx]
result['y'] = self._data.shape[-2]
result['x'] = self._data.shape[-1]
return result
@property
def num_hybs(self):
return self._num_hybs
@property
def num_chs(self):
return self._num_chs
@property
def num_zlayers(self):
return self._num_zlayers
@property
def tile_shape(self):
return self._tile_shape
def write(self, filepath: str, tile_opener=None) -> None:
"""write the image tensor to disk
Parameters
----------
filepath : str
path + prefix for writing the image tensor
tile_opener : TODO ttung: doc me.
"""
if self._data_needs_writeback:
for tile in self._image_partition.tiles():
h = tile.indices[Indices.HYB]
c = tile.indices[Indices.CH]
zlayer = tile.indices.get(Indices.Z, 0)
tile.numpy_array, axes = self.get_slice(indices={Indices.HYB: h, Indices.CH: c, Indices.Z: zlayer})
assert len(axes) == 0
self._data_needs_writeback = False
seen_x_coords, seen_y_coords, seen_z_coords = set(), set(), set()
for tile in self._image_partition.tiles():
seen_x_coords.add(tile.coordinates[Coordinates.X])
seen_y_coords.add(tile.coordinates[Coordinates.Y])
z_coords = tile.coordinates.get(Coordinates.Z, None)
if z_coords is not None:
seen_z_coords.add(z_coords)
sorted_x_coords = sorted(seen_x_coords)
sorted_y_coords = sorted(seen_y_coords)
sorted_z_coords = sorted(seen_z_coords)
x_coords_to_idx = {coords: idx for idx, coords in enumerate(sorted_x_coords)}
y_coords_to_idx = {coords: idx for idx, coords in enumerate(sorted_y_coords)}
z_coords_to_idx = {coords: idx for idx, coords in enumerate(sorted_z_coords)}
if tile_opener is None:
def tile_opener(tileset_path, tile, ext):
tile_basename = os.path.splitext(tileset_path)[0]
xcoord = tile.coordinates[Coordinates.X]
ycoord = tile.coordinates[Coordinates.Y]
zcoord = tile.coordinates.get(Coordinates.Z, None)
xcoord = tuple(xcoord) if isinstance(xcoord, list) else xcoord
ycoord = tuple(ycoord) if isinstance(ycoord, list) else ycoord
xval = x_coords_to_idx[xcoord]
yval = y_coords_to_idx[ycoord]
if zcoord is not None:
zval = z_coords_to_idx[zcoord]
zstr = "-Z{}".format(zval)
else:
zstr = ""
return open(
"{}-X{}-Y{}{}-H{}-C{}.{}".format(
tile_basename,
xval,
yval,
zstr,
tile.indices[Indices.HYB],
tile.indices[Indices.CH],
ext,
),
"wb")
Writer.write_to_path(
self._image_partition,
filepath,
pretty=True,
tile_opener=tile_opener)
def max_proj(self, *dims: Indices) -> numpy.ndarray:
"""return a max projection over one or more axis of the image tensor
Parameters
----------
dims : Indices
one or more axes to project over
Returns
-------
numpy.ndarray :
max projection
"""
axes = list()
for dim in dims:
try:
axes.append(ImageStack.AXES_MAP[dim])
except KeyError:
raise ValueError(
"Dimension: {} not supported. Expecting one of: {}".format(dim, ImageStack.AXES_MAP.keys()))
return numpy.max(self._data, axis=tuple(axes))
def squeeze(self) -> numpy.ndarray:
"""return an array that is linear over categorical dimensions and z
Returns
-------
np.ndarray :
array of shape (num_hybs + num_channels + num_z_layers, x, y).
"""
first_dim = self.num_hybs * self.num_chs * self.num_zlayers
new_shape = (first_dim,) + self.tile_shape
new_data = self.numpy_array.reshape(new_shape)
return new_data
def un_squeeze(self, stack):
if type(stack) is list:
stack = numpy.array(stack)
new_shape = (self.num_hybs, self.num_chs, self.num_zlayers) + self.tile_shape
res = stack.reshape(new_shape)
return res
```
#### File: features/pixels/_base.py
```python
from starfish.pipeline.algorithmbase import AlgorithmBase
class PixelFinderAlgorithmBase(AlgorithmBase):
def find(self, stack):
"""Find spots."""
raise NotImplementedError()
```
#### File: spots/detector/local_max_peak_finder.py
```python
from typing import List
import numpy as np
from trackpy import locate
from starfish.image import ImageStack
from starfish.pipeline.features.spot_attributes import SpotAttributes
from ._base import SpotFinderAlgorithmBase
class LocalMaxPeakFinder(SpotFinderAlgorithmBase):
def __init__(
self, spot_diameter, min_mass, max_size, separation, percentile=0, noise_size=None, smoothing_size=None,
threshold=None, preprocess: bool=False, is_volume: bool=False, verbose=False, **kwargs
) -> None:
"""Local max peak finding algorithm
This is a wrapper for `trackpy.locate`
Parameters
----------
spot_diameter : odd integer or tuple of odd integers.
This may be a single number or a tuple giving the feature’s extent in each dimension, useful when the
dimensions do not have equal resolution (e.g. confocal microscopy). The tuple order is the same as the
image shape, conventionally (z, y, x) or (y, x). The number(s) must be odd integers. When in doubt, round
up.
min_mass : float, optional
The minimum integrated brightness. This is a crucial parameter for eliminating spurious features.
Recommended minimum values are 100 for integer images and 1 for float images. Defaults to 0 (no filtering).
max_size : float
maximum radius-of-gyration of brightness, default None
separation : float or tuple
Minimum separtion between features. Default is diameter + 1. May be a tuple, see diameter for details.
percentile : float
Features must have a peak brighter than pixels in this percentile. This helps eliminate spurious peaks.
noise_size : float or tuple
Width of Gaussian blurring kernel, in pixels Default is 1. May be a tuple, see diameter for details.
smoothing_size : float or tuple
The size of the sides of the square kernel used in boxcar (rolling average) smoothing, in pixels Default
is diameter. May be a tuple, making the kernel rectangular.
threshold : float
Clip bandpass result below this value. Thresholding is done on the already background-subtracted image.
By default, 1 for integer images and 1/255 for float images.
preprocess : boolean
Set to False to turn off bandpass preprocessing.
max_iterations : integer
max number of loops to refine the center of mass, default 10
is_volume : bool
if True, run the algorithm on 3d volumes of the provided stack
verbose : bool
If True, report the percentage completed (default = False) during processing
See Also
--------
trackpy locate: http://soft-matter.github.io/trackpy/dev/generated/trackpy.locate.html
"""
self.diameter = spot_diameter
self.minmass = min_mass
self.maxsize = max_size
self.separation = separation
self.noise_size = noise_size
self.smoothing_size = smoothing_size
self.percentile = percentile
self.threshold = threshold
self.preprocess = preprocess
self.is_volume = is_volume
self.verbose = verbose
# # TODO ambrosejcarr: make this generalize to smFISH methods
# def encode(self, spot_attributes: SpotAttributes):
# spot_table = spot_attributes.data
# spot_table['barcode_index'] = np.ones(spot_table.shape[0])
def find_attributes(self, image: np.ndarray) -> SpotAttributes:
"""
Parameters
----------
image : np.ndarray
two- or three-dimensional numpy array containing spots to detect
Returns
-------
SpotAttributes :
spot attributes table for all detected spots
"""
attributes = locate(
image,
diameter=self.diameter,
minmass=self.minmass,
maxsize=self.maxsize,
separation=self.separation,
noise_size=self.noise_size,
smoothing_size=self.smoothing_size,
threshold=self.threshold,
percentile=self.percentile,
preprocess=self.preprocess
)
new_colnames = ['x', 'y', 'intensity', 'r', 'eccentricity', 'signal', 'raw_mass', 'ep']
if len(image.shape) == 3:
attributes.columns = ['z'] + new_colnames
else:
attributes.columns = new_colnames
attributes['spot_id'] = np.arange(attributes.shape[0])
return SpotAttributes(attributes)
def find(self, stack: ImageStack):
"""
Find spots.
Parameters
----------
stack : ImageStack
Stack where we find the spots in.
"""
spot_attributes: List[SpotAttributes] = stack.apply(
self.find_attributes, in_place=False, is_volume=self.is_volume, verbose=self.verbose)
# TODO ambrosejcarr: do we need to find spots in the aux_dict too?
# TODO ambrosejcarr: this is where development stopped; spot_attributes is correct, but translating
# spot_attributes into an encoder_table is tricky without first implementing the new codebook. Do that first.
# create an encoded table
# encoded_spots = self.encode(spot_attributes.data)
return spot_attributes
@classmethod
def add_arguments(cls, group_parser):
group_parser.add_argument("--spot-diameter", type=str, help='expected spot size')
group_parser.add_argument("--min-mass", default=4, type=int, help="minimum integrated spot intensity")
group_parser.add_argument("--max-size", default=6, type=int, help="maximum radius of gyration of brightness")
group_parser.add_argument("--separation", default=5, type=float, help="minimum distance between spots")
group_parser.add_argument(
"--noise-size", default=None, type=int, help="width of gaussian blurring kernel, in pixels")
group_parser.add_argument(
"--smoothing-size", default=None, type=int,
help="odd integer. Size of boxcar (moving average) filter in pixels. Default is the Diameter")
group_parser.add_argument(
"--preprocess", action="store_true", help="if passed, gaussian and boxcar filtering are applied")
group_parser.add_argument(
"--show", default=False, action='store_true', help="display results visually")
group_parser.add_argument(
"--percentile", default=None, type=float,
help="clip bandpass below this value. Thresholding is done on already background-subtracted images. "
"default 1 for integer images and 1/255 for float"
)
```
#### File: pipeline/filter/gaussian_high_pass.py
```python
import argparse
from functools import partial
from typing import Callable, Tuple
import numpy as np
from starfish.io import Stack
from starfish.pipeline.filter.gaussian_low_pass import GaussianLowPass
from ._base import FilterAlgorithmBase
class GaussianHighPass(FilterAlgorithmBase):
def __init__(self, sigma, **kwargs) -> None:
"""Gaussian high pass filter
Parameters
----------
sigma : int (default = 1)
standard deviation of gaussian kernel
"""
self.sigma = sigma
@classmethod
def add_arguments(cls, group_parser: argparse.ArgumentParser) -> None:
group_parser.add_argument(
"--sigma", default=1, type=int, help="standard deviation of gaussian kernel")
@staticmethod
def gaussian_high_pass(img: np.ndarray, sigma) -> np.ndarray:
"""
Applies a gaussian high pass filter to an image
Parameters
----------
img : numpy.ndarray
Image to filter
sigma : Union[float, int]
Standard deviation of gaussian kernel
Returns
-------
numpy.ndarray :
Filtered image, same shape as input
"""
blurred: np.ndarray = GaussianLowPass.low_pass(img, sigma)
over_flow_ind: np.ndarray[bool] = img < blurred
res: np.ndarray = img - blurred
res[over_flow_ind] = 0
return res
def filter(self, stack: Stack) -> None:
"""
Perform in-place filtering of an image stack and all contained aux images.
Parameters
----------
stack : starfish.Stack
Stack to be filtered.
"""
high_pass: Callable = partial(self.gaussian_high_pass, sigma=self.sigma)
stack.image.apply(high_pass)
# apply to aux dict too:
for auxiliary_image in stack.auxiliary_images.values():
auxiliary_image.apply(high_pass)
```
#### File: pipeline/filter/white_tophat.py
```python
import numpy as np
from ._base import FilterAlgorithmBase
class WhiteTophat(FilterAlgorithmBase):
"""
Performs "white top hat" filtering of an image to enhance spots. "White top hat filtering" finds spots that are both
smaller and brighter than their surroundings.
See Also
--------
https://en.wikipedia.org/wiki/Top-hat_transform
"""
def __init__(self, disk_size, **kwargs):
"""Instance of a white top hat morphological masking filter which masks objects larger than `disk_size`
Parameters
----------
disk_size : int
diameter of the morphological masking disk in pixels
"""
self.disk_size = disk_size
@classmethod
def add_arguments(cls, group_parser):
group_parser.add_argument(
"--disk-size", default=15, type=int, help="diameter of morphological masking disk in pixels")
def filter(self, stack) -> None:
"""Perform in-place filtering of an image stack and all contained aux images
Parameters
----------
stack : starfish.Stack
Stack to be filtered
"""
from scipy.ndimage.filters import maximum_filter, minimum_filter
from skimage.morphology import disk
def white_tophat(image):
if image.dtype.kind != "u":
raise TypeError("images should be stored in an unsigned integer array")
structuring_element = disk(self.disk_size)
min_filtered = minimum_filter(image, footprint=structuring_element)
max_filtered = maximum_filter(min_filtered, footprint=structuring_element)
filtered_image = image - np.minimum(image, max_filtered)
return filtered_image
stack.image.apply(white_tophat)
# apply to aux dict too.
for auxiliary_image in stack.auxiliary_images.values():
auxiliary_image.apply(white_tophat)
```
#### File: pipeline/gene_assignment/__init__.py
```python
import argparse
import json
from starfish.pipeline.pipelinecomponent import PipelineComponent
from starfish.util.argparse import FsExistsType
from . import _base
from . import point_in_poly
class GeneAssignment(PipelineComponent):
gene_assignment_group: argparse.ArgumentParser
@classmethod
def implementing_algorithms(cls):
return _base.GeneAssignmentAlgorithm.__subclasses__()
@classmethod
def add_to_parser(cls, subparsers):
"""Adds the gene_assignment component to the CLI argument parser."""
gene_assignment_group = subparsers.add_parser("gene_assignment")
gene_assignment_group.add_argument("--coordinates-geojson", type=FsExistsType(), required=True)
gene_assignment_group.add_argument("--spots-json", type=FsExistsType(), required=True)
gene_assignment_group.add_argument("-o", "--output", required=True)
gene_assignment_group.set_defaults(starfish_command=GeneAssignment._cli)
gene_assignment_subparsers = gene_assignment_group.add_subparsers(dest="gene_assignment_algorithm_class")
for algorithm_cls in cls.algorithm_to_class_map().values():
group_parser = gene_assignment_subparsers.add_parser(algorithm_cls.get_algorithm_name())
group_parser.set_defaults(gene_assignment_algorithm_class=algorithm_cls)
algorithm_cls.add_arguments(group_parser)
cls.gene_assignment_group = gene_assignment_group
@classmethod
def _cli(cls, args, print_help=False):
"""Runs the gene_assignment component based on parsed arguments."""
import pandas
from starfish import munge
if args.gene_assignment_algorithm_class is None or print_help:
cls.gene_assignment_group.print_help()
cls.gene_assignment_group.exit(status=2)
with open(args.coordinates_geojson, "r") as fh:
coordinates = json.load(fh)
regions = munge.geojson_to_region(coordinates)
spots = pandas.read_json(args.spots_json, orient="records")
instance = args.gene_assignment_algorithm_class(**vars(args))
result = instance.assign_genes(spots, regions)
print("Writing | cell_id | spot_id to: {}".format(args.output))
result.to_json(args.output, orient="records")
```
#### File: pipeline/segmentation/_base.py
```python
from starfish.pipeline.algorithmbase import AlgorithmBase
class SegmentationAlgorithmBase(AlgorithmBase):
def segment(self, hybridization_stack, nuclei_stack):
"""Performs registration on the stack provided."""
raise NotImplementedError()
```
#### File: starfish/starfish/stats.py
```python
import numpy as np
import pandas as pd
from functools import reduce
import scipy.ndimage.measurements as spm
from regional import many as Many
from regional import one as One
from scipy.sparse import coo_matrix
def stack_describe(stack):
num_hybs = stack.shape[0]
stats = [im_describe(stack[k, :]) for k in range(num_hybs)]
return stats
def im_describe(im):
shape = im.shape
flat_dims = reduce(lambda x, y: x * y, shape)
flat_im = np.reshape(im, flat_dims)
stats = pd.Series(flat_im).describe()
return stats.to_dict()
def label_to_regions(labels):
label_mat_coo = coo_matrix(labels)
def region_for(label_mat_coo, label):
ind = label_mat_coo.data == label
# TODO does this work in 3D?
x = label_mat_coo.row[ind]
y = label_mat_coo.col[ind]
# LOL -- in python3 zip returns an iterator. to force it to
# a list we call list(zip). In Python 2.7 this is effectively a noop
re = One(list(zip(x, y)))
return re
unique_labels = sorted(set(label_mat_coo.data))
regions = [region_for(label_mat_coo, label) for label in unique_labels]
return Many(regions)
def measure(im, labels, num_objs, measurement_type='mean'):
if measurement_type == 'mean':
res = spm.mean(im, labels, range(1, num_objs))
elif measurement_type == 'max':
res = spm.maximum(im, labels, range(1, num_objs))
else:
raise ValueError('Unsporrted measurement type: {}'.format(measurement_type))
return res
def measure_stack(stack, labels, num_objs, measurement_type='mean'):
from starfish.munge import stack_to_list
ims = stack_to_list(stack)
res = [measure(im, labels, num_objs, measurement_type) for im in ims]
return res
```
#### File: test/image/test_slicedimage_dtype.py
```python
import warnings
import numpy
import pytest
from starfish.errors import DataFormatWarning
from starfish.test.dataset_fixtures import synthetic_stack
NUM_HYB = 2
NUM_CH = 2
NUM_Z = 2
HEIGHT = 10
WIDTH = 10
def create_tile_data_provider(dtype: numpy.number, corner_dtype: numpy.number):
"""
Makes a stack that's all of the same type, except the hyb=0,ch=0,z=0 corner, which is a different type. All the
tiles are initialized with ones.
Parameters
----------
dtype : numpy.number
The data type of all the tiles except the hyd=0,ch=0,z=0 corner.
corner_dtype
The data type of the tile in the hyd=0,ch=0,z=0 corner.
Returns
-------
ImageStack :
The image stack with the tiles initialized as described.
"""
def tile_data_provider(hyb: int, ch: int, z: int, height: int, width: int) -> numpy.ndarray:
if hyb == 0 and ch == 0 and z == 0:
return numpy.ones((height, width), dtype=corner_dtype)
else:
return numpy.ones((height, width), dtype=dtype)
return tile_data_provider
def test_multiple_tiles_of_different_kind():
with pytest.raises(TypeError):
synthetic_stack(
NUM_HYB, NUM_CH, NUM_Z,
HEIGHT, WIDTH,
tile_data_provider=create_tile_data_provider(numpy.uint32, numpy.float32),
)
def test_multiple_tiles_of_same_dtype():
stack = synthetic_stack(
NUM_HYB, NUM_CH, NUM_Z,
HEIGHT, WIDTH,
tile_data_provider=create_tile_data_provider(numpy.uint32, numpy.uint32),
)
expected = numpy.ones(
(NUM_HYB,
NUM_CH,
NUM_Z,
HEIGHT,
WIDTH), dtype=numpy.uint32)
assert numpy.array_equal(stack.numpy_array, expected)
def test_int_type_promotion():
with warnings.catch_warnings(record=True) as w:
stack = synthetic_stack(
NUM_HYB, NUM_CH, NUM_Z,
HEIGHT, WIDTH,
tile_data_provider=create_tile_data_provider(numpy.int32, numpy.int8),
)
assert len(w) == 1
assert issubclass(w[0].category, DataFormatWarning)
expected = numpy.ones(
(NUM_HYB,
NUM_CH,
NUM_Z,
HEIGHT,
WIDTH), dtype=numpy.int32)
corner = numpy.empty(
(HEIGHT,
WIDTH), dtype=numpy.int32)
corner.fill(16777216)
expected[0, 0, 0] = corner
assert numpy.array_equal(stack.numpy_array, expected)
def test_uint_type_promotion():
with warnings.catch_warnings(record=True) as w:
stack = synthetic_stack(
NUM_HYB, NUM_CH, NUM_Z,
HEIGHT, WIDTH,
tile_data_provider=create_tile_data_provider(numpy.uint32, numpy.uint8),
)
assert len(w) == 1
assert issubclass(w[0].category, DataFormatWarning)
expected = numpy.ones(
(NUM_HYB,
NUM_CH,
NUM_Z,
HEIGHT,
WIDTH), dtype=numpy.uint32)
corner = numpy.empty(
(HEIGHT,
WIDTH), dtype=numpy.uint32)
corner.fill(16777216)
expected[0, 0, 0] = corner
assert numpy.array_equal(stack.numpy_array, expected)
def test_float_type_promotion():
with warnings.catch_warnings(record=True) as w:
stack = synthetic_stack(
NUM_HYB, NUM_CH, NUM_Z,
HEIGHT, WIDTH,
tile_data_provider=create_tile_data_provider(numpy.float64, numpy.float32),
)
assert len(w) == 1
assert issubclass(w[0].category, DataFormatWarning)
expected = numpy.ones(
(NUM_HYB,
NUM_CH,
NUM_Z,
HEIGHT,
WIDTH), dtype=numpy.float64)
assert numpy.array_equal(stack.numpy_array, expected)
``` |
{
"source": "joshmorenx/CitySends",
"score": 2
} |
#### File: CitySends/blog/models.py
```python
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
class Post(models.Model):
content = models.TextField(max_length=1000)
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
likes= models.IntegerField(default=0)
dislikes= models.IntegerField(default=0)
latitude = models.FloatField(max_length=140, default=0)
longitude = models.FloatField(max_length=140, default=0)
header_image = models.ImageField(null= True, blank=True, upload_to='gallery/')
estado = models.TextField(max_length=10, default=0)
def __str__(self):
return self.content[:]
@property
def number_of_comments(self):
return Comment.objects.filter(post_connected=self).count()
class Reflejado(models.Model):
content = models.TextField(max_length=1000)
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
likes= models.IntegerField(default=0)
dislikes= models.IntegerField(default=0)
latitude = models.FloatField(max_length=140, default=0)
longitude = models.FloatField(max_length=140, default=0)
header_image = models.ImageField(null= True, blank=True, upload_to='gallery/')
mexico_states = models.TextField(max_length=1000, default=0)
def __str__(self):
return self.content[:]
@property
def number_of_comments(self):
return Comment.objects.filter(post_connected=self).count()
class Comment(models.Model):
content = models.TextField(max_length=150)
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
post_connected = models.ForeignKey(Post, on_delete=models.CASCADE)
class Preference(models.Model):
user= models.ForeignKey(User, on_delete=models.CASCADE)
post= models.ForeignKey(Post, on_delete=models.CASCADE)
value= models.IntegerField()
date= models.DateTimeField(auto_now= True)
def __str__(self):
return str(self.user) + ':' + str(self.post) +':' + str(self.value)
class Meta:
unique_together = ("user", "post", "value")
```
#### File: CitySends/blog/views.py
```python
from django.shortcuts import render, get_object_or_404, redirect
from blog.models import Post, Comment, Preference, Reflejado
from users.models import Follow, Profile
import sys
from django.contrib.auth.models import User
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.db.models import Count
from .forms import NewCommentForm
from django.contrib.auth.decorators import login_required
from .serializers import UserSerializer, GroupSerializer, PostSerializer
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from rest_framework import permissions
from rest_framework.decorators import api_view
from django.http.response import JsonResponse
from rest_framework.parsers import JSONParser
from rest_framework import status
from django import forms
#=================================================================================================================>
import pandas as pd
import geopandas as gpd
from shapely import wkt
from shapely.geometry import Polygon, Point
df = pd.read_csv('poligonos_mx.csv') #Se lee el csv como un dataframe normal y después se convierte a geodataframe
df['geometry'] = df['geometry'].apply(wkt.loads)
gdf = gpd.GeoDataFrame(df, crs='epsg:4326')
def dime_estado(lon, lat):
estado = 'Fuera de México'
band = 0
point = Point(lon, lat)
for index, row in gdf.iterrows():
poligon = gdf['geometry'][index]
if poligon.contains(point):
estado = gdf['estado'][index]
band = 1
break
return estado
def is_users(post_user, logged_user):
return post_user == logged_user
PAGINATION_COUNT = 3
class PostListView(LoginRequiredMixin, ListView):
model = Post
template_name = 'blog/home.html'
context_object_name = 'posts'
ordering = ['-date_posted']
paginate_by = PAGINATION_COUNT
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
all_users = []
data_counter = Post.objects.values('author')\
.annotate(author_count=Count('author'))\
.order_by('-author_count')[:6]
for aux in data_counter:
all_users.append(User.objects.filter(pk=aux['author']).first())
# if Preference.objects.get(user = self.request.user):
# data['preference'] = True
# else:
# data['preference'] = False
data['preference'] = Preference.objects.all()
# print(Preference.objects.get(user= self.request.user))
data['all_users'] = all_users
print(all_users, file=sys.stderr)
return data
def get_queryset(self):
user = self.request.user
qs = Follow.objects.filter(user=user)
follows = [user]
for obj in qs:
follows.append(obj.follow_user)
return Post.objects.filter(author__in=follows).order_by('-date_posted')
class UserPostListView(LoginRequiredMixin, ListView):
model = Post
template_name = 'blog/user_posts.html'
context_object_name = 'posts'
paginate_by = PAGINATION_COUNT
def visible_user(self):
return get_object_or_404(User, username=self.kwargs.get('username'))
def get_context_data(self, **kwargs):
visible_user = self.visible_user()
logged_user = self.request.user
print(logged_user.username == '', file=sys.stderr)
if logged_user.username == '' or logged_user is None:
can_follow = False
else:
can_follow = (Follow.objects.filter(user=logged_user,
follow_user=visible_user).count() == 0)
data = super().get_context_data(**kwargs)
data['user_profile'] = visible_user
data['can_follow'] = can_follow
return data
def get_queryset(self):
user = self.visible_user()
return Post.objects.filter(author=user).order_by('-date_posted')
def post(self, request, *args, **kwargs):
if request.user.id is not None:
follows_between = Follow.objects.filter(user=request.user,
follow_user=self.visible_user())
if 'follow' in request.POST:
new_relation = Follow(user=request.user, follow_user=self.visible_user())
if follows_between.count() == 0:
new_relation.save()
elif 'unfollow' in request.POST:
if follows_between.count() > 0:
follows_between.delete()
return self.get(self, request, *args, **kwargs)
class PostDetailView(DetailView):
model = Post
template_name = 'blog/post_detail.html'
context_object_name = 'post'
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
comments_connected = Comment.objects.filter(post_connected=self.get_object()).order_by('-date_posted')
data['comments'] = comments_connected
data['form'] = NewCommentForm(instance=self.request.user)
return data
def post(self, request, *args, **kwargs):
new_comment = Comment(content=request.POST.get('content'),
author=self.request.user,
post_connected=self.get_object())
new_comment.save()
return self.get(self, request, *args, **kwargs)
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
template_name = 'blog/post_delete.html'
context_object_name = 'post'
success_url = '/'
def test_func(self):
return is_users(self.get_object().author, self.request.user)
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['content', 'latitude', 'longitude', 'header_image']
template_name = 'blog/post_new.html'
success_url = '/'
def form_valid(self, form):
form.instance.author = self.request.user
form.instance.estado = dime_estado(form.instance.longitude, form.instance.latitude)
return super().form_valid(form)
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['tag_line'] = 'Add a new post'
return data
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Post
fields = ['content', 'latitude', 'longitude', 'header_image']
template_name = 'blog/post_new.html'
success_url = '/'
def form_valid(self, form):
form.instance.author = self.request.user
form.instance.estado = dime_estado(form.instance.longitude, form.instance.latitude)
return super().form_valid(form)
def test_func(self):
return is_users(self.get_object().author, self.request.user)
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['tag_line'] = 'Edit a post'
return data
class FollowsListView(ListView):
model = Follow
template_name = 'blog/follow.html'
context_object_name = 'follows'
def visible_user(self):
return get_object_or_404(User, username=self.kwargs.get('username'))
def get_queryset(self):
user = self.visible_user()
return Follow.objects.filter(user=user).order_by('-date')
def get_context_data(self, *, object_list=None, **kwargs):
data = super().get_context_data(**kwargs)
data['follow'] = 'follows'
return data
class FollowersListView(ListView):
model = Follow
template_name = 'blog/follow.html'
context_object_name = 'follows'
def visible_user(self):
return get_object_or_404(User, username=self.kwargs.get('username'))
def get_queryset(self):
user = self.visible_user()
return Follow.objects.filter(follow_user=user).order_by('-date')
def get_context_data(self, *, object_list=None, **kwargs):
data = super().get_context_data(**kwargs)
data['follow'] = 'followers'
return data
# Like Functionality====================================================================================
@login_required
def postpreference(request, postid, userpreference):
if request.method == "POST":
eachpost= get_object_or_404(Post, id=postid)
obj=''
valueobj=''
try:
obj= Preference.objects.get(user= request.user, post= eachpost)
valueobj= obj.value #value of userpreference
valueobj= int(valueobj)
userpreference= int(userpreference)
if valueobj != userpreference:
obj.delete()
upref= Preference()
upref.user= request.user
upref.post= eachpost
upref.value= userpreference
if userpreference == 1 and valueobj != 1:
eachpost.likes += 1
eachpost.dislikes -=1
elif userpreference == 2 and valueobj != 2:
eachpost.dislikes += 1
eachpost.likes -= 1
upref.save()
eachpost.save()
context= {'eachpost': eachpost,
'postid': postid}
return redirect('blog-home')
elif valueobj == userpreference:
obj.delete()
if userpreference == 1:
eachpost.likes -= 1
elif userpreference == 2:
eachpost.dislikes -= 1
eachpost.save()
context= {'eachpost': eachpost,
'postid': postid}
return redirect('blog-home')
except Preference.DoesNotExist:
upref= Preference()
upref.user= request.user
upref.post= eachpost
upref.value= userpreference
userpreference= int(userpreference)
if userpreference == 1:
eachpost.likes += 1
elif userpreference == 2:
eachpost.dislikes +=1
upref.save()
eachpost.save()
context= {'eachpost': eachpost,
'postid': postid}
return redirect('blog-home')
else:
eachpost= get_object_or_404(Post, id=postid)
context= {'eachpost': eachpost,
'postid': postid}
return redirect('blog-home')
#busqueda de posts ==================================================================================>>
@login_required
def DataExtraction(request):
contenido = request.POST.get('busqueda')
usuario = request.POST.get('username')
desde = request.POST.get('date1')
hasta = request.POST.get('date2')
prefijo = request.POST.get('estado')
if contenido==None and usuario==None and desde==None and hasta==None:
return render(request,'blog/practise.html',)
elif prefijo != '' and usuario == '' and desde == '' and hasta == '':
obj = Post.objects.filter(estado__contains=prefijo)
context = {
'encontrar': obj,
}
return render(request,'blog/practise.html', context)
#cuando solo se llena el contenido
elif contenido != '' and usuario == '' and desde == '' and hasta == '':
obj = Post.objects.filter(content__contains=contenido)
context = {
'encontrar': obj,
}
return render(request,'blog/practise.html', context)
#cuando se llenan el contenido y las fechas unicamente
elif contenido != '' and usuario == '' and desde != '' and hasta != '':
obj = Post.objects.filter(content__contains=contenido, date_posted__range=[desde, hasta],)
context = {
'encontrar': obj,
}
return render(request,'blog/practise.html', context)
#cuando se llenan el contenido y el usuario unicamente
elif contenido != '' and usuario != '' and desde == '' and hasta == '':
obj = Post.objects.filter(content__contains=contenido, author__username__icontains=usuario,)
context = {
'encontrar': obj,
}
return render(request,'blog/practise.html', context)
#cuando se llenan todos los campos
elif contenido != '' and usuario != '' and desde != '' and hasta != '':
obj = Post.objects.filter(content__contains=contenido, author__username__icontains=usuario, date_posted__range=[desde, hasta],)
context = {
'encontrar': obj,
}
return render(request,'blog/practise.html', context)
#cuando se llena el solo el campo usuario
elif contenido == '' and usuario != '' and desde == '' and hasta == '':
obj = Post.objects.filter(author__username__icontains=usuario,)
context = {
'encontrar': obj,
}
return render(request,'blog/practise.html', context)
else:
nada = 'Ingrese algún caractér en el cuadro de busquedas'
also_nada = {
'fnd': nada,
}
return render(request,'blog/practise.html', also_nada)
#fin de busqueda de posts ==================================================================================<<
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
permission_classes = [permissions.IsAuthenticated]
@api_view(['GET', 'POST', 'DELETE'])
def post_list(request):
if request.method == 'GET':
posts = Post.objects.all()
title = request.query_params.get('title', None)
if title is not None:
posts = posts.filter(title__icontains=title)
posts_serializer = PostSerializer(posts, many=True)
return JsonResponse(posts_serializer.data, safe=False)
# 'safe=False' for objects serialization
elif request.method == 'POST':
post_data = JSONParser().parse(request)
post_serializer = PostSerializer(data=post_data)
if post_serializer.is_valid():
post_serializer.save()
return JsonResponse(post_serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(post_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
count = Post.objects.all().delete()
return JsonResponse({'message': '{} Posts were deleted successfully!'.format(count[0])}, status=status.HTTP_204_NO_CONTENT)
``` |
{
"source": "joshms123/wxasync",
"score": 3
} |
#### File: src/examples/simple.py
```python
import wx
from wxasync import AsyncBind, WxAsyncApp, StartCoroutine
import asyncio
from asyncio.events import get_event_loop
import time
class TestFrame(wx.Frame):
def __init__(self, parent=None):
super(TestFrame, self).__init__(parent)
vbox = wx.BoxSizer(wx.VERTICAL)
button1 = wx.Button(self, label="Submit")
self.edit = wx.StaticText(self, style=wx.ALIGN_CENTRE_HORIZONTAL|wx.ST_NO_AUTORESIZE)
self.edit_timer = wx.StaticText(self, style=wx.ALIGN_CENTRE_HORIZONTAL|wx.ST_NO_AUTORESIZE)
vbox.Add(button1, 2, wx.EXPAND|wx.ALL)
vbox.AddStretchSpacer(1)
vbox.Add(self.edit, 1, wx.EXPAND|wx.ALL)
vbox.Add(self.edit_timer, 1, wx.EXPAND|wx.ALL)
self.SetSizer(vbox)
self.Layout()
AsyncBind(wx.EVT_BUTTON, self.async_callback, button1)
StartCoroutine(self.update_clock, self)
async def async_callback(self, event):
self.edit.SetLabel("Button clicked")
await asyncio.sleep(1)
self.edit.SetLabel("Working")
await asyncio.sleep(1)
self.edit.SetLabel("Completed")
async def update_clock(self):
while True:
self.edit_timer.SetLabel(time.strftime('%H:%M:%S'))
await asyncio.sleep(0.5)
app = WxAsyncApp()
frame = TestFrame()
frame.Show()
app.SetTopWindow(frame)
loop = get_event_loop()
loop.run_until_complete(app.MainLoop())
``` |
{
"source": "joshmun/biola-video-scraper",
"score": 3
} |
#### File: joshmun/biola-video-scraper/scrape-biola.py
```python
from bs4 import BeautifulSoup
# update this path accordingly
with open("./html/module4.html") as fp:
soup = BeautifulSoup(fp, "html.parser")
allIframes = soup.find_all('iframe')
def exclude_iframe_error(tag):
return tag.name == 'iframe' and not tag.has_attr('title')
iframes = soup.find_all(exclude_iframe_error)
srcHtml = list(map(lambda frame: frame['src'], iframes))
def get_video_url(srcHtmlPath):
with open(f"./html/{srcHtmlPath}") as fp:
soup = BeautifulSoup(fp, "html.parser")
return soup.find('video')['src']
videoLinks = list(map(get_video_url, srcHtml))
file = open('vidlinks.txt', 'r+')
file.truncate(0)
for link in videoLinks:
file.write(f"{link}\n")
file.close()
``` |
{
"source": "joshnankivel/wikikit",
"score": 4
} |
#### File: wikikit/gui/main.py
```python
from tkinter import *
from tkinter.ttk import *
window = Tk()
window.title("Welcome to my training app")
window.geometry('750x500')
# label example
lbl = Label(window, text="Hello")
lbl.grid(column=0, row=0)
#lbl.pack(side=LEFT) #https://www.tutorialspoint.com/python/tk_pack.htm
# textbox entry example
txt = Entry(window, width=30)
txt.grid(column=0, row=1)
txt.focus()
#txt.pack(side=LEFT)
# button example
def button_clicked():
res = "Welcome to " + txt.get()
lbl.configure(text=res)
btn = Button(window, text="Click Me", command=button_clicked)
btn.grid(column=1, row=1)
#btn.pack(side=LEFT)
# combobox example
def selected(event=None):
cmbsel = combo.get()
lbl.configure(text=cmbsel)
combo = Combobox(window)
combo['values'] = (1,2,3,4,5,"Text")
combo.grid(column=0, row=2)
combo.bind('<<ComboboxSelected>>', selected)
#combo.pack(side=LEFT)
# checkbox example
def checked(event=None):
lbl.configure(text=str(chk_state.get()))
chk_state = BooleanVar()
chk_state.set(False)
chk = Checkbutton(window, text='Choose', var=chk_state, command=checked)
chk.grid(column=0, row=3)
#chk.pack(side=LEFT)
# radiobutton example
def radio_clicked():
lbl.configure(text=radio_selected.get())
radio_selected = IntVar()
rad1 = Radiobutton(window, text='First', value=1, variable=radio_selected, command=radio_clicked)
rad2 = Radiobutton(window, text='Second', value=2, variable=radio_selected, command=radio_clicked)
rad3 = Radiobutton(window, text='Third', value=3, variable=radio_selected, command=radio_clicked)
rad1.grid(column=0, row=4)
rad2.grid(column=0, row=5)
rad3.grid(column=0, row=6)
# text area example
from tkinter import scrolledtext
txtbox = scrolledtext.ScrolledText(window, width=40, height=10)
txtbox.insert(INSERT,'Sample pre-filled text')
#txtbox.delete(1.0,END) # sample of clearing textbox
txtbox.grid(column=0, row=7)
# messagebox example
from tkinter import messagebox
def msgbtn_clicked():
messagebox.showinfo('Info Title','Info Content')
#messagebox.showwarning('Warning Title','Warning Content')
#messagebox.showerror('Error Title','Error Content')
btn = Button(window, text='Click here for messagebox', command=msgbtn_clicked)
btn.grid(column=0, row=8)
# dialog boxes prompting for user input
def questionbtn_clicked():
res = messagebox.askyesnocancel("Question Title","Question Content")
lbl.configure(text=res)
btn1 = Button(window, text='Click here to be asked', command=questionbtn_clicked)
btn1.grid(column=0, row=9)
#res = messagebox.askquestion("Question Title","Question Content")
#res = messagebox.askyesno("Question Title","Question Content")
#res = messagebox.askyesnocancel("Question Title","Question Content")
#res = messagebox.askokcancel("Question Title","Question Content")
#res = messagebox.askretrycancel("Question Title","Question Content")
# spinbox example
def spin_changed():
lbl.configure(text=var.get())
var = IntVar()
var.set(36)
spin = Spinbox(window, from_=0, to=100, width=5, textvariable=var, command=spin_changed)
#spin = Spinbox(window, values=(3, 8, 11), width=5)
spin.grid(column=0, row=10)
# progressbar example
from tkinter.ttk import Progressbar
bar = Progressbar(window, length=500)
bar['value'] = 70 # 70 means 70%
bar.grid(column=0, row=11)
# style Progressbar
style = Style()
style.theme_use('default')
style.configure("black.Horizontal.TProgressbar", background='black')
bar2 = Progressbar(window, length=500, style='black.Horizontal.TProgressbar')
bar2['value'] = 70
bar2.grid(column=0, row=12)
# filedialog
from tkinter import filedialog
# the filetypes parameter is optional to filter by extensions
#file = filedialog.askopenfilename(filetypes = (("Text files","*.txt"),("All files","*.*")))
#lbl.configure(text=file)
#files = filedialog.askopenfilenames()
# directory
#dir = filedialog.askdirectory()
#lbl.configure(text=dir)
# menu bar
from tkinter import Menu
menu = Menu(window)
new_item = Menu(menu, tearoff=0)
new_item.add_command(label='New', command=msgbtn_clicked)
#new_item.add_separator()
new_item.add_command(label='Edit')
menu.add_cascade(label='File', menu=new_item)
window.config(menu=menu)
# tab control (Notebook)
tab_control = Notebook(window)
tab1 = Frame(tab_control)
tab2 = Frame(tab_control)
tab_control.add(tab1, text='First')
tab_control.add(tab2, text='Second')
lbl1 = Label(tab1, text='label1', padx=5, pady=5)
lbl1.grid(column=0, row=14)
lbl2 = Label(tab2, text='label2')
lbl2.grid(column=1, row=14)
#tab_control.pack(expand=1, fill='both')
tab_control.grid(column=0, row=13)
window.mainloop()
``` |
{
"source": "JoshNotWright/tacofancy-api",
"score": 2
} |
#### File: JoshNotWright/tacofancy-api/app.py
```python
from flask import Flask, make_response, request, render_template, current_app, redirect, url_for
from functools import update_wrapper
from flask_sqlalchemy import SQLAlchemy
import json
import os
import random
import requests
from os import path
from urlparse import urlparse
from bs4 import BeautifulSoup
import markdown2 as md
from datetime import timedelta
from slughifi import slughifi
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
db = SQLAlchemy(app)
##################
## Data Models ##
##################
class BaseLayer(db.Model):
__tablename__ = 'base_layer'
url = db.Column(db.String, primary_key=True)
name = db.Column(db.String)
slug = db.Column(db.String)
recipe = db.Column(db.Text)
def __repr__(self):
return '<BaseLayer %r>' % self.name
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Condiment(db.Model):
__tablename__ = 'condiment'
url = db.Column(db.String, primary_key=True)
name = db.Column(db.String)
slug = db.Column(db.String)
recipe = db.Column(db.Text)
def __repr__(self):
return '<Condiment %r>' % self.name
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Mixin(db.Model):
__tablename__ = 'mixin'
url = db.Column(db.String, primary_key=True)
name = db.Column(db.String)
slug = db.Column(db.String)
recipe = db.Column(db.Text)
def __repr__(self):
return '<Mixin %r>' % self.name
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Seasoning(db.Model):
__tablename__ = 'seasoning'
url = db.Column(db.String, primary_key=True)
name = db.Column(db.String)
slug = db.Column(db.String)
recipe = db.Column(db.Text)
def __repr__(self):
return '<Seasoning %r>' % self.name
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Shell(db.Model):
__tablename__ = 'shell'
url = db.Column(db.String, primary_key=True)
name = db.Column(db.String)
slug = db.Column(db.String)
recipe = db.Column(db.Text)
def __repr__(self):
return '<Shell %r>' % self.name
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class FullTaco(db.Model):
__tablename__ = 'full_taco'
url = db.Column(db.String, primary_key=True)
name = db.Column(db.String)
slug = db.Column(db.String)
recipe = db.Column(db.Text)
base_layer_url = db.Column(db.String, db.ForeignKey('base_layer.url'))
base_layer = db.relationship('BaseLayer', backref=db.backref('full_taco', lazy='dynamic'))
condiment_url = db.Column(db.String, db.ForeignKey('condiment.url'))
condiment = db.relationship('Condiment', backref=db.backref('full_taco', lazy='dynamic'))
mixin_url = db.Column(db.String, db.ForeignKey('mixin.url'))
mixin = db.relationship('Mixin', backref=db.backref('full_taco', lazy='dynamic'))
seasoning_url = db.Column(db.String, db.ForeignKey('seasoning.url'))
seasoning = db.relationship('Seasoning', backref=db.backref('full_taco', lazy='dynamic'))
shell_url = db.Column(db.String, db.ForeignKey('shell.url'))
shell = db.relationship('Shell', backref=db.backref('full_taco', lazy='dynamic'))
def __repr__(self):
return '<FullTaco %r>' % self.name
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
contrib_fulltaco = db.Table(
'contrib_fulltaco',
db.Column('contrib_username', db.String, db.ForeignKey('contributor.username')),
db.Column('full_taco_url', db.String, db.ForeignKey('full_taco.url')),
)
contrib_shell = db.Table(
'contrib_shell',
db.Column('contrib_username', db.String, db.ForeignKey('contributor.username')),
db.Column('shell_url', db.String, db.ForeignKey('shell.url')),
)
contrib_seasoning = db.Table(
'contrib_seasoning',
db.Column('contrib_username', db.String, db.ForeignKey('contributor.username')),
db.Column('seasoning_url', db.String, db.ForeignKey('seasoning.url')),
)
contrib_mixin = db.Table(
'contrib_mixin',
db.Column('contrib_username', db.String, db.ForeignKey('contributor.username')),
db.Column('mixin_url', db.String, db.ForeignKey('mixin.url')),
)
contrib_condiment = db.Table(
'contrib_condiment',
db.Column('contrib_username', db.String, db.ForeignKey('contributor.username')),
db.Column('condiment_url', db.String, db.ForeignKey('condiment.url')),
)
contrib_baselayer = db.Table(
'contrib_baselayer',
db.Column('contrib_username', db.String, db.ForeignKey('contributor.username')),
db.Column('baselayer_url', db.String, db.ForeignKey('base_layer.url')),
)
class Contributor(db.Model):
__tablename__ = 'contributor'
username = db.Column(db.String, primary_key=True)
gravatar = db.Column(db.String)
full_name = db.Column(db.String)
full_tacos = db.relationship('FullTaco', secondary=contrib_fulltaco,
backref=db.backref('contributors', lazy='dynamic'))
shells = db.relationship('Shell', secondary=contrib_shell,
backref=db.backref('contributors', lazy='dynamic'))
seasonings = db.relationship('Seasoning', secondary=contrib_seasoning,
backref=db.backref('contributors', lazy='dynamic'))
mixins = db.relationship('Mixin', secondary=contrib_mixin,
backref=db.backref('contributors', lazy='dynamic'))
condiments = db.relationship('Condiment', secondary=contrib_condiment,
backref=db.backref('contributors', lazy='dynamic'))
base_layers = db.relationship('BaseLayer', secondary=contrib_baselayer,
backref=db.backref('contributors', lazy='dynamic'))
def __repr__(self):
return '<Contributor %r>' % self.username
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
#############################
## Data loading functions ##
#############################
base_url = 'https://raw.github.com/JoshNotWright/tacofancy/master'
MAPPER = {
'base_layers': BaseLayer,
'condiments': Condiment,
'mixins': Mixin,
'seasonings': Seasoning,
'shells': Shell
}
def get_cookin(model, links):
saved = []
for link in links:
full_url = '%s/%s' % (base_url, link)
recipe = requests.get(full_url)
if recipe.status_code is 200:
soup = BeautifulSoup(md.markdown(recipe.content))
name = soup.find('h1')
if name:
name = name.text
else:
name = ' '.join(path.basename(urlparse(full_url).path).split('_')).replace('.md', '').title()
ingredient = db.session.query(model).get(full_url)
ingredient_data = {
'url': full_url,
'name': name,
'slug': slughifi(name),
'recipe': recipe.content.decode('utf-8'),
}
if not ingredient:
ingredient = model(**ingredient_data)
db.session.add(ingredient)
db.session.commit()
else:
for k,v in ingredient_data.items():
setattr(ingredient, k, v)
db.session.add(ingredient)
db.session.commit()
saved.append(ingredient)
else:
ingredient = model.query.get(full_url)
if ingredient:
db.session.delete(ingredient)
db.session.commit()
return saved
def preheat():
index = requests.get('%s/INDEX.md' % base_url)
soup = BeautifulSoup(md.markdown(index.content))
links = [a for a in soup.find_all('a') if a.get('href').endswith('.md')]
full_tacos = [f.get('href') for f in links if 'full_tacos/' in f.get('href')]
base_layers = [b.get('href') for b in links if 'base_layers/' in b.get('href')]
mixins = [m.get('href') for m in links if 'mixins/' in m.get('href')]
condiments = [c.get('href') for c in links if 'condiments/' in c.get('href')]
seasonings = [s.get('href') for s in links if 'seasonings/' in s.get('href')]
shells = [s.get('href') for s in links if 'shells/' in s.get('href')]
bases = get_cookin(BaseLayer, base_layers)
conds = get_cookin(Condiment, condiments)
seas = get_cookin(Seasoning, seasonings)
mix = get_cookin(Mixin, mixins)
shell = get_cookin(Shell, shells)
for full_taco in get_cookin(FullTaco, full_tacos):
soup = BeautifulSoup(md.markdown(full_taco.recipe))
ingredient_links = [l.get('href') for l in soup.find_all('a') if l.get('href').endswith('.md')]
for link in ingredient_links:
parts = urlparse(link).path.split('/')[-2:]
kind = MAPPER[parts[0]]
scrubbed_link = '/'.join(parts)
full_link = '%s/%s' % (base_url, scrubbed_link)
ingredient = db.session.query(kind).get(full_link)
if ingredient:
setattr(full_taco, ingredient.__tablename__, ingredient)
db.session.add(full_taco)
db.session.commit()
return None
##############################################
## Cross Domain decorator for Flask routes ##
##############################################
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
########################
## Stupid randomizer ##
########################
def fetch_random(model):
count = model.query.count()
if count:
index = random.randint(0, count - 1)
pk = db.session.query(db.distinct(model.url)).all()[index][0]
return model.query.get(pk)
else:
return None
def fetch_random_ingredients():
taco = {}
taco['seasoning'] = fetch_random(Seasoning)
taco['condiment'] = fetch_random(Condiment)
taco['mixin'] = fetch_random(Mixin)
taco['base_layer'] = fetch_random(BaseLayer)
taco['shell'] = fetch_random(Shell)
return taco
###################
## Flask routes ##
###################
@app.route('/random/', methods=['GET'])
@crossdomain(origin="*")
def random_taco():
full_taco = request.args.get('full-taco')
taco = {}
if full_taco:
taco_obj = fetch_random(FullTaco)
taco = taco_obj.as_dict()
if taco.get('condiment_url'):
taco['condiment'] = taco_obj.condiment.as_dict()
if taco.get('seasoning_url'):
taco['seasoning'] = taco_obj.seasoning.as_dict()
if taco.get('base_layer_url'):
taco['base_layer'] = taco_obj.base_layer.as_dict()
taco['base_layer']['slug'] = taco_obj.base_layer.slug
if taco.get('mixin_url'):
taco['mixin'] = taco_obj.mixin.as_dict()
if taco.get('shell_url'):
taco['shell'] = taco_obj.shell.as_dict()
else:
data = fetch_random_ingredients()
taco = {}
for k,v in data.items():
taco[k] = v.as_dict()
resp = make_response(json.dumps(taco))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/')
def index():
taco = fetch_random_ingredients()
taco['render_link'] = True
return render_template('permalink.html', **taco)
@app.route('/<path:path>/')
def permalink(path):
try:
base_layer, mixin, condiment, seasoning, shell = path.split('/')
except ValueError:
return redirect(url_for('index'))
taco = {}
taco['base_layer'] = BaseLayer.query.filter_by(slug=base_layer).first()
taco['mixin'] = Mixin.query.filter_by(slug=mixin).first()
taco['condiment'] = Condiment.query.filter_by(slug=condiment).first()
taco['seasoning'] = Seasoning.query.filter_by(slug=seasoning).first()
taco['shell'] = Shell.query.filter_by(slug=shell).first()
taco['render_link'] = False
return render_template('permalink.html', **taco)
def get_all_things(thing):
model = MAPPER[thing]
things = model.query.all()
return json.dumps([t.as_dict() for t in things])
def get_one_thing(thing, slug):
model = MAPPER[thing]
it = model.query.filter_by(slug=slug).first()
if it:
return json.dumps(it.as_dict())
else:
return json.dumps({'status': 'error', 'message': '%s with the slug "%s" not found' % (thing, slug)})
@app.route('/base_layers/')
@crossdomain(origin="*")
def base_layers():
resp = make_response(get_all_things('base_layers'))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/base_layers/<slug>/')
@crossdomain(origin="*")
def base_layer(slug):
resp = make_response(get_one_thing('base_layers', slug))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/mixins/')
@crossdomain(origin="*")
def mixins():
resp = make_response(get_all_things('mixins'))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/mixins/<slug>/')
@crossdomain(origin="*")
def mixin(slug):
resp = make_response(get_one_thing('mixins', slug))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/condiments/')
@crossdomain(origin="*")
def condiments():
resp = make_response(get_all_things('condiments'))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/condiments/<slug>/')
@crossdomain(origin="*")
def condiment(slug):
resp = make_response(get_one_thing('condiments', slug))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/seasonings/')
@crossdomain(origin="*")
def seasonings():
resp = make_response(get_all_things('seasonings'))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/seasonings/<slug>/')
@crossdomain(origin="*")
def seasoning(slug):
resp = make_response(get_one_thing('seasonings', slug))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/shells/')
@crossdomain(origin="*")
def shells():
resp = make_response(get_all_things('shells'))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/shells/<slug>/')
@crossdomain(origin="*")
def shell(slug):
resp = make_response(get_one_thing('shells', slug))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/contributions/')
@crossdomain(origin="*")
def contributor_list():
conts = Contributor.query.all()
resp = make_response(json.dumps([c.as_dict() for c in conts]))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/contributions/<username>/')
@crossdomain(origin="*")
def contributions(username):
cont = Contributor.query.filter_by(username=username).first()
if not cont:
resp = make_response(json.dumps({'error': 'Contributor with github username "%s" not found' % username}), 404)
else:
data = cont.as_dict()
data['base_layers'] = [b.name for b in cont.base_layers]
data['condiments'] = [c.name for c in cont.condiments]
data['mixins'] = [m.name for m in cont.mixins]
data['shells'] = [s.name for s in cont.shells]
data['seasonings'] = [s.name for s in cont.seasonings]
resp = make_response(json.dumps(data))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/contributors/<layer_type>/')
@crossdomain(origin="*")
def layer_slugs(layer_type):
try:
model = MAPPER[layer_type]
except KeyError:
resp = make_response(json.dumps({'error': 'Invalid layer type: %s' % layer_type}), 404)
resp.headers['Content-Type'] = 'application/json'
return resp
slugs = [{'name': l.name, 'slug': l.slug} for l in model.query.all()]
resp = make_response(json.dumps(slugs))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/contributors/<recipe_type>/<recipe_slug>/')
@crossdomain(origin="*")
def contributors(recipe_type, recipe_slug):
model = MAPPER[recipe_type]
recipe = model.query.filter_by(slug=recipe_slug).first()
contributors = [c.as_dict() for c in recipe.contributors.all()]
resp = make_response(json.dumps(contributors))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/cook/', methods=['GET', 'POST'])
def cook():
db.create_all()
preheat()
return make_response('did it')
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "joshnroy/baselines",
"score": 3
} |
#### File: baselines/common/layers.py
```python
import tensorflow as tf
def dense(num_in, num_out, name, in_tensor):
weights = tf.Variable(tf.truncated_normal([num_in, num_out], stddev=0.03), name=name+'_W')
bias = tf.Variable(tf.truncated_normal([num_out], stddev=0.01), name=name+'_B')
out_tensor = tf.matmul(in_tensor, weights) + bias
return out_tensor
``` |
{
"source": "joshnroy/mujoco_jaco",
"score": 2
} |
#### File: joshnroy/mujoco_jaco/darla_dqn.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import six
import cv2
from jaco_arm import JacoEnv
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Flatten, Conv2D, BatchNormalization, MaxPooling2D, Reshape, Permute, Activation, Conv3D, Lambda, Input, Concatenate
from keras.optimizers import Adam, RMSprop
from keras.initializers import RandomUniform
import keras.backend as K
from keras.callbacks import ModelCheckpoint
from keras.utils import multi_gpu_model
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy, LinearAnnealedPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory
from variational_autoencoder_deconv import vae
WEIGHTS_FILE = "darla_beta_vae.h5"
HIDDEN_SIZE = 256
NUM_HIDDEN_LAYERS = 5
WINDOW_LENGTH = 4
MULTI_GPU = False
def run():
"""Construct and start the environment."""
env = JacoEnv(64,
64,
100,
0.1,
0.8,
True)
nb_actions = env.real_num_actions
new_floor_color = list((0.55 - 0.45) * np.random.random(3) + 0.45) + [1.]
new_cube_color = list(np.random.random(3)) + [1.]
env.change_floor_color(new_floor_color)
env.change_cube_color(new_cube_color)
global vae
vae.load_weights(WEIGHTS_FILE)
print("#########################")
nb_observation_space = (64, 64, 3)
original_input = Input(shape=(WINDOW_LENGTH,) + nb_observation_space)
in_layer = [Lambda(lambda x: x[:, i, :, :])(original_input) for i in range(WINDOW_LENGTH)]
vae = Model(vae.inputs, [vae.layers[-2].outputs[2]])
for layer in vae.layers:
layer.trainable = False
print(vae.summary())
vae_output = [vae(x) for x in in_layer]
x = Concatenate()(vae_output)
x = Dense(512, activation='relu')(x)
x = Dense(512, activation='relu')(x)
x = Dense(nb_actions, activation='linear')(x)
model = Model(original_input, [x])
print(model.summary())
if MULTI_GPU:
model = multi_gpu_model(model, gpus=2)
print(model.summary())
num_warmup = 50000
num_simulated_annealing = 500000 + num_warmup
memory = SequentialMemory(limit=1000000, window_length=WINDOW_LENGTH)
policy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', value_max=1., value_min=.1, value_test=.05, nb_steps=num_simulated_annealing)
dqn = DQNAgent(model=model, nb_actions=nb_actions, policy=policy, memory=memory, nb_steps_warmup=num_warmup, gamma=.99, target_model_update=10000, train_interval=4, delta_clip=1.)
dqn.compile(Adam(lr=.00025), metrics=['mae'])
if False:
checkpoint_callback = ModelCheckpoint("darla_dqn_checkpoint", monitor='episode_reward', verbose=0, save_best_only=True, save_weights_only=True, mode='max', period = 10)
history = dqn.fit(env, nb_steps=num_simulated_annealing + 450000,
visualize=False, verbose=1, callbacks = [checkpoint_callback])
dqn.save_weights("darla_dqn_weights")
np.savez_compressed("darla_dqn_history",
episode_reward=np.asarray(history.history['episode_reward']))
else:
dqn.load_weights("darla_dqn_weights")
print("original domain")
source_test_losses = dqn.test(env, nb_episodes=100, visualize=True)
np.savez_compressed("darla_dqn_source_test",
episode_reward=np.asarray(source_test_losses.history['episode_reward']),
nb_steps=np.asarray(source_test_losses.history['nb_steps']))
print("target domain")
new_floor_color = [0.4, 0.6, 0.4, 1.]
new_cube_color = [1.0, 0.0, 0.0, 1.]
env.change_floor_color(new_floor_color)
env.change_cube_color(new_cube_color)
target_test_losses = dqn.test(env, nb_episodes=100, visualize=True)
np.savez_compressed("darla_dqn_target_test",
episode_reward=np.asarray(target_test_losses.history['episode_reward']),
nb_steps=np.asarray(target_test_losses.history['nb_steps']))
source_array = np.asarray(source_test_losses.history['episode_reward'])
target_array = np.asarray(target_test_losses.history['episode_reward'])
print(source_array.min(), source_array.mean(), source_array.max())
print(target_array.min(), target_array.mean(), target_array.max())
if __name__ == '__main__':
run()
``` |
{
"source": "joshnroy/SimSiam",
"score": 2
} |
#### File: joshnroy/SimSiam/alignment_and_uniformity.py
```python
import os
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from tqdm import tqdm, trange
from arguments import get_args
from augmentations import get_aug
from models import get_model, get_backbone
from tools import AverageMeter
from datasets import get_dataset
from optimizers import get_optimizer, LR_Scheduler
from sklearn.manifold import TSNE
import plotly.express as px
import io
from PIL import Image
import imageio
import numpy as np
import cv2
import random
#%%
args=get_args(inputs=["--config_file=configs/simsiam_stream51.yaml", "--data_dir=../stream_data", "--log_dir=../alignment_logs", "--ckpt_dir=.cache/jitter0", "--preload_dataset", "--bbox_crop", "--eval_from=alignment_models/stream51-cifar_time_jittering_deterministic0.pth"])
# %%
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
args.eval.num_epochs = 10
args.dataset_kwargs['ordering'] = 'instance'
train_loader = torch.utils.data.DataLoader(
dataset=get_dataset(
transform=get_aug(train=True, train_classifier=False, **args.aug_kwargs),
train=True,
**args.dataset_kwargs
),
batch_size=args.eval.batch_size,
shuffle=True,
**args.dataloader_kwargs
)
test_loader = torch.utils.data.DataLoader(
dataset=get_dataset(
transform=get_aug(train=False, train_classifier=False, **args.aug_kwargs),
train=False,
**args.dataset_kwargs
),
batch_size=args.eval.batch_size,
shuffle=True,
**args.dataloader_kwargs
)
# %%
if args.eval_from is not None:
print("Loading model from", args.eval_from)
model = get_backbone(args.model.backbone)
save_dict = torch.load(args.eval_from, map_location='cpu')
msg = model.load_state_dict({k[9:]:v for k, v in save_dict['state_dict'].items() if k.startswith('backbone.')}, strict=True)
print(msg)
model.to(args.device)
else:
print("Specify an eval_from")
# %%
with torch.no_grad():
all_train_features = []
train_outputs = {}
for x in tqdm(train_loader):
images = x[0]
labels = x[-1]
feature = model(images.to(args.device, non_blocking=True))
for i_label in range(len(labels)):
l = labels[i_label].item()
f = feature[i_label].detach().cpu()
if l not in train_outputs:
train_outputs[l] = []
train_outputs[l].append(f)
all_train_features.append(f)
all_train_features = torch.stack(all_train_features)
for l in train_outputs:
train_outputs[l] = (torch.stack(train_outputs[l]) - all_train_features.mean()) / all_train_features.std()
all_train_features = (all_train_features - all_train_features.mean()) / all_train_features.std()
# %%
with torch.no_grad():
all_test_features = []
test_outputs = {}
for x in tqdm(test_loader):
images = x[0]
labels = x[-1]
feature = model(images.to(args.device, non_blocking=True))
for i_label in range(len(labels)):
l = labels[i_label].item()
f = feature[i_label].detach().cpu()
if l not in test_outputs:
test_outputs[l] = []
test_outputs[l].append(f)
all_test_features.append(f)
all_test_features = torch.stack(all_test_features)
for l in test_outputs:
test_outputs[l] = (torch.stack(test_outputs[l]) - all_test_features.mean()) / all_test_features.std()
all_test_features = (all_test_features - all_test_features.mean()) / all_test_features.std()
# %%
def align_loss(x, y, device, alpha=2, batch_size=512):
align_loss = 0.
for i in range(0, len(x), batch_size):
x_batch = x[i:i+batch_size].to(device)
y_batch = y[i:i+batch_size].to(device)
align_loss += (x_batch - y_batch).norm(p=2, dim=1).pow(alpha).mean().item()
return align_loss
def uniform_loss(x, device, t=2, batch_size=512):
uniform_loss = 0.
n = 0
for i in range(0, len(x), batch_size):
x_batch = x[i:i+batch_size].to(device)
uniform_loss = (n / (n + 1.)) * uniform_loss + (1 / (n + 1.)) * torch.pdist(x_batch, p=2).pow(2).mul(-t).exp().mean().log().item()
n += 1.
print(uniform_loss)
return uniform_loss
# %%
with torch.no_grad():
train_uniform_metric = uniform_loss(all_train_features, args.device)
test_uniform_metric = uniform_loss(all_test_features, args.device)
train_uniform_metric
test_uniform_metric
# %%
```
#### File: SimSiam/augmentations/__init__.py
```python
from .simsiam_aug import SimSiamTransform
from .eval_aug import Transform_single
from .byol_aug import BYOL_transform
from .simclr_aug import SimCLRTransform
def get_aug(name='simsiam', image_size=224, train=True, train_classifier=None, double_images=False, single_aug=None, mean_std=[[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]]):
if train==True:
if name == 'simsiam':
augmentation = SimSiamTransform(image_size, double_images, single_aug, mean_std=mean_std)
elif name == 'byol':
augmentation = BYOL_transform(image_size, single_aug=single_aug, mean_std=mean_std)
elif name == 'simclr':
augmentation = SimCLRTransform(image_size)
elif name == 'barlow':
augmentation = BYOL_transform(image_size, single_aug=single_aug, mean_std=mean_std)
else:
raise NotImplementedError
elif train==False:
if train_classifier is None:
raise Exception
augmentation = Transform_single(image_size, train=train_classifier)
else:
raise Exception
return augmentation
```
#### File: SimSiam/datasets/__init__.py
```python
import torch
import torchvision
from .random_dataset import RandomDataset
from StreamDataset import StreamDataset
from UCFDataset import UCFDataset, UCFImageDataset
def get_dataset(dataset, data_dir, transform, train=True, download=False, debug_subset_size=None, ordering='iid', small_dataset=False, temporal_jitter_range=0, preload=False, bbox_crop=False):
if dataset == 'mnist':
dataset = torchvision.datasets.MNIST(data_dir, train=train, transform=transform, download=download)
elif dataset == 'stl10':
dataset = torchvision.datasets.STL10(data_dir, split='train+unlabeled' if train else 'test', transform=transform, download=download)
elif dataset == 'cifar10':
dataset = torchvision.datasets.CIFAR10(data_dir, train=train, transform=transform, download=download)
elif dataset == 'cifar100':
dataset = torchvision.datasets.CIFAR100(data_dir, train=train, transform=transform, download=download)
elif dataset == 'imagenet':
dataset = torchvision.datasets.ImageNet(data_dir, split='train' if train == True else 'val', transform=transform, download=download)
elif dataset == 'random':
dataset = RandomDataset()
elif dataset == 'stream51':
dataset = StreamDataset(data_dir, train=train, ordering=ordering, transform=transform, small_dataset=small_dataset, temporal_jitter_range=temporal_jitter_range, preload=preload, bbox_crop=bbox_crop)
elif dataset == 'ucf101':
dataset = UCFImageDataset(data_dir, train, transform, small_dataset=small_dataset, preload_dataset=preload)
elif dataset == 'ucf101_vid':
dataset = UCFDataset(data_dir, train, transform)
else:
raise NotImplementedError
if debug_subset_size is not None:
dataset = torch.utils.data.Subset(dataset, range(0, debug_subset_size)) # take only one batch
dataset.classes = dataset.dataset.classes
dataset.targets = dataset.dataset.targets
return dataset
```
#### File: joshnroy/SimSiam/main.py
```python
import os
import shutil
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import numpy as np
from tqdm import tqdm
from arguments import get_args
from augmentations import get_aug
from models import get_model
from tools import AverageMeter, knn_monitor, Logger, file_exist_check, get_feature_var
from datasets import get_dataset
from optimizers import get_optimizer, LR_Scheduler
from linear_eval import main as linear_eval
from datetime import datetime
import sys
import wandb
import pandas as pd
import cv2
import imageio
from copy import deepcopy
def save_images(imgs, labels, name, fps=2):
print("WRITING SAMPLE IMAGES")
path_name = os.path.join("..", "images_" + name)
if os.path.exists(path_name):
shutil.rmtree(path_name)
os.makedirs(path_name)
assert len(imgs) == len(labels)
images = []
for i_save in range(len(imgs)):
sample = imgs[i_save].numpy()
# Convert to 0-1, (W, H, C)
sample -= sample.min()
sample /= sample.max()
sample = (sample.transpose((1, 2, 0)) * 255)
sample = cv2.resize(
sample, (8 * sample.shape[1], 8 * sample.shape[0]), interpolation=cv2.INTER_CUBIC)
# Save image
images.append(sample)
imageio.mimwrite(os.path.join(path_name, "movie.gif"), images, fps=fps)
def main(device, args):
cifar_args = deepcopy(args)
cifar_args.eval.num_classes = 10
cifar_args.dataset_kwargs['dataset'] = 'cifar10'
if args.no_augmentation:
print("NO AUGMENTATION IID", flush=True)
train_loader = torch.utils.data.DataLoader(
dataset=get_dataset(
transform=get_aug(
train=False, train_classifier=True, **args.aug_kwargs),
train=True,
**args.dataset_kwargs),
shuffle=True,
batch_size=args.train.batch_size,
**args.dataloader_kwargs
)
else:
train_loader = torch.utils.data.DataLoader(
dataset=get_dataset(
transform=get_aug(train=True, **args.aug_kwargs),
train=True,
**args.dataset_kwargs),
shuffle=True,
batch_size=args.train.batch_size,
**args.dataloader_kwargs
)
memory_loader = torch.utils.data.DataLoader(
dataset=get_dataset(
transform=get_aug(train=True, **args.aug_kwargs),
train=True,
**args.dataset_kwargs),
shuffle=True,
batch_size=args.train.batch_size,
**args.dataloader_kwargs
)
test_loader = torch.utils.data.DataLoader(
dataset=get_dataset(
transform=get_aug(
train=False, train_classifier=False, **args.aug_kwargs),
train=False,
**args.dataset_kwargs),
shuffle=False,
batch_size=args.train.batch_size,
**args.dataloader_kwargs
)
cifar_dataset_kwargs = cifar_args.dataloader_kwargs
cifar_memory_loader = torch.utils.data.DataLoader(
dataset=get_dataset(
transform=get_aug(
train=False, train_classifier=True, **args.aug_kwargs),
train=True,
**cifar_args.dataset_kwargs),
shuffle=True,
batch_size=args.train.batch_size,
**cifar_args.dataloader_kwargs
)
cifar_test_loader = torch.utils.data.DataLoader(
dataset=get_dataset(
transform=get_aug(
train=False, train_classifier=False, **args.aug_kwargs),
train=False,
**cifar_args.dataset_kwargs),
shuffle=False,
batch_size=args.train.batch_size,
**cifar_args.dataloader_kwargs
)
# define model
model = get_model(args.model).to(device)
model = torch.nn.DataParallel(model)
# if args.wandb:
# wandb.watch(model)
# define optimizer
optimizer = get_optimizer(
args.train.optimizer.name, model,
lr=args.train.base_lr*args.train.batch_size/256,
momentum=args.train.optimizer.momentum,
weight_decay=args.train.optimizer.weight_decay)
lr_scheduler = LR_Scheduler(
optimizer,
args.train.warmup_epochs, args.train.warmup_lr*args.train.batch_size/256,
args.train.num_epochs, args.train.base_lr*args.train.batch_size /
256, args.train.final_lr*args.train.batch_size/256,
len(train_loader),
constant_predictor_lr=True # see the end of section 4.2 predictor
)
logger = Logger(tensorboard=args.logger.tensorboard,
matplotlib=args.logger.matplotlib, log_dir=args.log_dir)
accuracy = 0
# Start training
if args.train.knn_monitor:
train_accuracy, train_features = knn_monitor(model.module.backbone, memory_loader, memory_loader, device, k=min(
args.train.knn_k, len(memory_loader.dataset)), hide_progress=args.hide_progress)
test_accuracy, test_features = knn_monitor(model.module.backbone, memory_loader, test_loader, device, k=min(
args.train.knn_k, len(memory_loader.dataset)), hide_progress=args.hide_progress)
print("before training (train, test) accuracy",
train_accuracy, test_accuracy)
train_accuracy = 0.
test_accuracy = 0.
train_var = get_feature_var(model.module.backbone, memory_loader)
test_var = get_feature_var(model.module.backbone, test_loader)
if args.model.name == 'byol':
global_step = 0
max_steps = args.train.stop_at_epoch * len(train_loader)
global_progress = tqdm(
range(0, args.train.stop_at_epoch), desc=f'Training')
for epoch in global_progress:
model.train()
batch_loss = 0.
batch_updates = 0
batch_var = 0.
batch_mean = 0.
local_progress = tqdm(
train_loader, desc=f'Epoch {epoch}/{args.train.num_epochs}', disable=args.hide_progress)
for idx, data in enumerate(local_progress):
assert len(data) in [3, 2]
if len(data) == 3:
images1, images2, labels = data
if type(images1) == list and len(images1) == 2 and type(images2) == list and len(images2) == 2:
images1 = images1[0]
images2 = images2[1]
else: # len(data) == 2
images1, images2 = data[0]
labels = data[1]
if args.save_sample:
save_images(torch.cat((images1, images2), 3), labels, "iid")
return
model.zero_grad()
data_dict = model.forward(images1.to(
device, non_blocking=True), images2.to(device, non_blocking=True))
loss = data_dict['loss'].mean() # ddp
data_dict['loss'] = loss
loss.backward()
optimizer.step()
lr_scheduler.step()
data_dict.update({'lr': lr_scheduler.get_lr()})
batch_mean += (data_dict['feature_mean'] - batch_mean) / (batch_updates+1)
batch_var += (data_dict['feature_var'] - batch_mean) * (data_dict['feature_var'] - data_dict['feature_mean'])
data_dict['feature_mean'] = data_dict['feature_mean'].mean()
data_dict['feature_var'] = data_dict['feature_var'].mean()
if args.model.name == 'byol':
# model.module.update_moving_average(global_step, max_steps)
global_step += 1
local_progress.set_postfix(data_dict)
logger.update_scalers(data_dict)
batch_loss += loss.item()
batch_updates += 1
if args.train.knn_monitor and epoch % args.train.knn_interval == 0:
train_accuracy, train_features = knn_monitor(model.module.backbone, memory_loader, memory_loader, device, k=min(
args.train.knn_k, len(memory_loader.dataset)), hide_progress=args.hide_progress)
test_accuracy, test_features = knn_monitor(model.module.backbone, memory_loader, test_loader, device, k=min(
args.train.knn_k, len(memory_loader.dataset)), hide_progress=args.hide_progress)
train_var = torch.var(train_features, dim=0).mean().item()
test_var = torch.var(test_features, dim=0).mean().item()
if epoch % args.train.knn_interval == 0:
train_var = get_feature_var(model.module.backbone, memory_loader)
test_var = get_feature_var(model.module.backbone, test_loader)
model_path = os.path.join(
args.ckpt_dir, f"{args.name}_{datetime.now().strftime('%m%d%H%M%S')}.pth")
torch.save({
'epoch': epoch+1,
'state_dict': model.module.state_dict()
}, model_path)
print(f"Model saved to {model_path}")
with open(os.path.join(args.log_dir, f"checkpoint_path.txt"), 'w+') as f:
f.write(f'{model_path}')
epoch_dict = {"Epoch": epoch, "Loss": batch_loss / batch_updates, "Train Feature Variance": train_var, "Test Feature Variance": test_var}
if args.wandb:
wandb.log(epoch_dict)
global_progress.set_postfix(epoch_dict)
logger.update_scalers(epoch_dict)
train_accuracy, test_accuracy, train_features, test_features = linear_eval(
args, train_loader=memory_loader, test_loader=test_loader, model=model.module.backbone)
cifar_train_accuracy, cifar_test_accuracy, cifar_train_features, cifar_test_features = linear_eval(
cifar_args, train_loader=cifar_memory_loader, test_loader=cifar_test_loader, model=model.module.backbone)
epoch_dict = {"Train Accuracy": train_accuracy, "Test Accuracy": test_accuracy, "Cifar Train Accuracy": cifar_train_accuracy, "Cifar Test Accuracy": cifar_test_accuracy, "Train Feature Standard Deviation": torch.std(train_features, dim=0).mean().item(), "Test Feature Standard Deviation": torch.std(test_features, dim=0).mean().item()}
print("FINAL OUTPUTS", flush=True)
print(epoch_dict, flush=True)
if args.wandb:
wandb.log(epoch_dict)
# Save checkpoint
# datetime.now().strftime('%Y%m%d_%H%M%S')
model_path = os.path.join(
args.ckpt_dir, f"{args.name}_{datetime.now().strftime('%m%d%H%M%S')}.pth")
torch.save({
'epoch': epoch+1,
'state_dict': model.module.state_dict()
}, model_path)
print(f"Model saved to {model_path}")
with open(os.path.join(args.log_dir, f"checkpoint_path.txt"), 'w+') as f:
f.write(f'{model_path}')
if __name__ == "__main__":
args = get_args()
if args.wandb:
wandb_config = pd.json_normalize(vars(args), sep='_')
wandb_config = wandb_config.to_dict(orient='records')[0]
wandb.init(project='simsiam', config=wandb_config, group=args.wandb_group)
print("Using device", args.device)
main(device=args.device, args=args)
completed_log_dir = args.log_dir.replace(
'in-progress', 'debug' if args.debug else 'completed')
os.rename(args.log_dir, completed_log_dir)
print(f'Log file has been saved to {completed_log_dir}')
``` |
{
"source": "Josh-Null-A/brainpy",
"score": 3
} |
#### File: brainpy/brainpy/lexer.py
```python
class Lexer:
KNOWN_TOKENS = {
'>' : 'POINTER',
'<' : 'POINTER',
'+' : 'OP',
'-' : 'OP',
'.' : 'OI',
',' : 'OI',
'[' : 'LOOP',
']' : 'LOOP'
}
@staticmethod
def find_tokens(file):
file = open(file)
tokens = []
# Go through each line
for line in file:
# Go through each char
for char in line:
# Detect a true comment.
# A true comment is different from normal comments.
# A true comment will skip an entire line,
# Wheras a normal comment might still have a token in it later on
# EXAMPLE: >+ comment: this + 2 numbers
# brain.py would still detect that second + unless there was a #
if char == '#':
break
# Every char that isn't part of KNOWN_TOKENS is considered a comment.
if char in Lexer.KNOWN_TOKENS:
tokens.append([char, Lexer.KNOWN_TOKENS[char]])
return tokens
``` |
{
"source": "joshnunley/voting-optimization",
"score": 4
} |
#### File: voting-optimization/src/VoteModel.py
```python
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
class VoteModel:
"""
Summary:
Optimization algorithm using voting methods
Required Parameters:
nk_model is an instance of the NKLandscape class (though, as an optimization method,
this parameter could take any class instance which has binary array solutions of fixed size N and
a calculate_fitness method)
solutions is a 2d binary numpy array of shape (num_solutions, N) where N is the size of solutions
to the provided nk model
Optional Parameters:
possible_vote_indices is the region of the solution that is allowed to be modified.
If this parameter is not provided, the entire solution is allowed to be modified.
vote_size determines the number of bit positions to vote on in each voting round
vote_type determines the voting model used to decide which proposal to implement
To do:
-Implement Plurality with Runoff
-Implement Borda Rule
-Implement Hare Rule
-Implement Coombs Rule
"""
def __init__(
self,
nk_model=None,
solutions=None,
possible_vote_indices=None,
vote_size=2,
vote_type="plurality",
):
if nk_model is None:
raise ValueError("An NKLandscape model must be provided")
self.nk = nk_model
if solutions is None:
raise ValueError("A set of voting solutions must be provided")
self.solutions = np.copy(solutions)
if possible_vote_indices is None:
self.possible_vote_indices = np.arange(self.nk.N)
else:
self.possible_vote_indices = possible_vote_indices
self.num_solutions = solutions.shape[0]
self.vote_size = vote_size
self.vote_type = vote_type
self.mean_list = np.zeros(shape=(50, 2))
def _calculate_proposal_fitnesses(self, proposal_indices):
proposal_fitnesses = np.zeros((self.num_solutions, 2 ** self.vote_size))
for i, solution in enumerate(self.solutions):
for proposal in range(2 ** self.vote_size):
binary_proposal = self._decimal_to_binary(proposal, self.vote_size)
proposed_solution = np.copy(solution)
np.put(proposed_solution, proposal_indices, binary_proposal)
proposal_fitnesses[i, proposal] = self.nk.calculate_fitness(
proposed_solution
)
return proposal_fitnesses
def _decimal_to_binary(self, decimal, length):
binary = list(map(int, bin(decimal)[2:]))
pad = length - len(binary)
binary = [0] * pad + binary
return np.asarray(binary, int)
def _binary_to_decimal(self, binary):
decimal = 0
for i in range(len(binary)):
decimal += binary[len(binary) - i - 1] * 2 ** i
return decimal
def _determine_winner(self, proposal_fitnesses):
if self.vote_type == "plurality":
tally = np.zeros(2 ** self.vote_size)
for i in range(self.num_solutions):
vote = np.argwhere(
proposal_fitnesses[i] == np.max(proposal_fitnesses[i])
)[0]
tally[vote[0]] += 1
decimal_winner = np.argwhere(tally == np.max(tally))[0][0]
binary_winner = self._decimal_to_binary(decimal_winner, self.vote_size)
elif self.vote_type == "approval":
tally = np.zeros(2 ** self.vote_size)
current_fitnesses = self.get_fitnesses()
for i in range(self.num_solutions):
votes = np.argwhere(proposal_fitnesses[i] > current_fitnesses[i]).T
if len(votes[0]) > 0:
tally[votes[0]] += 1
else:
self_vote = np.argwhere(
proposal_fitnesses[i] == current_fitnesses[i]
).T
tally[self_vote[0]] += 1
decimal_winner = np.argwhere(tally == np.max(tally))[0][0]
binary_winner = self._decimal_to_binary(decimal_winner, self.vote_size)
elif self.vote_type == "ranked": # This is where new code started
current_fitnesses = self.get_fitnesses()[np.newaxis].T
marginal_scores = proposal_fitnesses - np.tile(
current_fitnesses, 2 ** self.vote_size
)
scores = np.sort(marginal_scores, axis=1)
# Implement Borda Rule here
decimal_winner = np.argwhere(scores == np.max(scores))[0][0]
binary_winner = self._decimal_to_binary(decimal_winner, self.vote_size)
# A version of score that is most similar to actual score voting (actually more like cumulative voting).
# Each solution has the same amount of score (1) to alot to the proposals
elif self.vote_type == "normalized_score":
minimum_proposal_fitnesses = np.min(proposal_fitnesses, axis=1)[
np.newaxis
].T
minimum_proposal_fitnesses = np.tile(
minimum_proposal_fitnesses, 2 ** self.vote_size
)
positive_proposal_fitnesses = (
proposal_fitnesses - minimum_proposal_fitnesses
)
normalizing_factors = np.sum(positive_proposal_fitnesses, axis=1)[
np.newaxis
].T
normalizing_factors = np.tile(normalizing_factors, 2 ** self.vote_size)
normalized_scores = np.divide(
positive_proposal_fitnesses, normalizing_factors
)
scores = np.sum(normalized_scores, axis=0)
decimal_winner = np.argwhere(scores == np.max(scores))[0][0]
binary_winner = self._decimal_to_binary(decimal_winner, self.vote_size)
# These assume voters know how their utility differs from another's (in the case of nk
# landscapes this is obviously true)
elif self.vote_type == "total_score":
scores = np.sum(proposal_fitnesses, axis=0)
decimal_winner = np.argwhere(scores == np.max(scores))[0][0]
binary_winner = self._decimal_to_binary(decimal_winner, self.vote_size)
# I think this should be the optimal setting for optimization
elif self.vote_type == "marginal_score":
current_fitnesses = self.get_fitnesses()[np.newaxis].T
marginal_scores = proposal_fitnesses - np.tile(
current_fitnesses, 2 ** self.vote_size
)
scores = np.sum(marginal_scores, axis=0)
decimal_winner = np.argwhere(scores == np.max(scores))[0][0]
binary_winner = self._decimal_to_binary(decimal_winner, self.vote_size)
return binary_winner
def _update_solutions(self, winner, proposal_indices):
for i in range(self.num_solutions):
np.put(self.solutions[i], proposal_indices, winner)
self.solutions = np.unique(self.solutions, axis=0)
self.num_solutions = self.solutions.shape[0]
def _generate_vote_indicies(self):
np.random.shuffle(self.possible_vote_indices)
return self.possible_vote_indices[0 : self.vote_size]
def print_distribution(self, i, verbose):
if verbose:
fitnesses = self.get_fitnesses()
print(
"\n vote iterations:",
i + 1,
"num_solutions:",
self.get_num_solutions(),
"\n mean:",
np.mean(fitnesses),
"variance:",
np.var(fitnesses),
"\n min:",
np.min(fitnesses),
"max:",
np.max(fitnesses),
)
sns.distplot(fitnesses)
plt.show()
def get_mean(self):
fitnesses = self.get_fitnesses()
return np.mean(fitnesses)
def get_variance(self):
fitnesses = self.get_fitnesses()
return np.var(fitnesses)
def get_max(self):
fitness = self.get_fitnesses()
return np.max(fitness)
def get_min(self):
fitness = self.get_fitnesses()
return np.min(fitness)
def run(self, iterations=100, until_unique=False, verbose=False):
for i in range(iterations):
proposal_indices = self._generate_vote_indicies()
proposal_fitnesses = self._calculate_proposal_fitnesses(proposal_indices)
winner = self._determine_winner(proposal_fitnesses)
self._update_solutions(winner, proposal_indices)
self.print_distribution(i, verbose)
self.mean_list[i] = [i, self.get_mean()]
if until_unique and (self.num_solutions == 1):
break
def get_solutions(self):
return self.solutions
def set_solutions(self, solutions):
self.solutions = solutions
self.num_solutions = self.solutions.shape[0]
def get_num_solutions(self):
return self.num_solutions
def get_fitnesses(self):
fitnesses = np.zeros(self.num_solutions)
for i in range(self.num_solutions):
fitnesses[i] = self.nk.calculate_fitness(self.solutions[i])
return fitnesses
def get_nk_model(self):
return self.nk
``` |
{
"source": "joshOberhaus/roomba_500_series",
"score": 2
} |
#### File: roomba_500_series/msg/_Buttons.py
```python
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class Buttons(genpy.Message):
_md5sum = "2c6635fea08c0a11307b4518b1f7fd79"
_type = "roomba_500_series/Buttons"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
bool clean
bool spot
bool dock
bool day
bool hour
bool minute
bool schedule
bool clock
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','clean','spot','dock','day','hour','minute','schedule','clock']
_slot_types = ['std_msgs/Header','bool','bool','bool','bool','bool','bool','bool','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,clean,spot,dock,day,hour,minute,schedule,clock
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Buttons, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.clean is None:
self.clean = False
if self.spot is None:
self.spot = False
if self.dock is None:
self.dock = False
if self.day is None:
self.day = False
if self.hour is None:
self.hour = False
if self.minute is None:
self.minute = False
if self.schedule is None:
self.schedule = False
if self.clock is None:
self.clock = False
else:
self.header = std_msgs.msg.Header()
self.clean = False
self.spot = False
self.dock = False
self.day = False
self.hour = False
self.minute = False
self.schedule = False
self.clock = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_8B.pack(_x.clean, _x.spot, _x.dock, _x.day, _x.hour, _x.minute, _x.schedule, _x.clock))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.clean, _x.spot, _x.dock, _x.day, _x.hour, _x.minute, _x.schedule, _x.clock,) = _struct_8B.unpack(str[start:end])
self.clean = bool(self.clean)
self.spot = bool(self.spot)
self.dock = bool(self.dock)
self.day = bool(self.day)
self.hour = bool(self.hour)
self.minute = bool(self.minute)
self.schedule = bool(self.schedule)
self.clock = bool(self.clock)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_8B.pack(_x.clean, _x.spot, _x.dock, _x.day, _x.hour, _x.minute, _x.schedule, _x.clock))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.clean, _x.spot, _x.dock, _x.day, _x.hour, _x.minute, _x.schedule, _x.clock,) = _struct_8B.unpack(str[start:end])
self.clean = bool(self.clean)
self.spot = bool(self.spot)
self.dock = bool(self.dock)
self.day = bool(self.day)
self.hour = bool(self.hour)
self.minute = bool(self.minute)
self.schedule = bool(self.schedule)
self.clock = bool(self.clock)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_8B = struct.Struct("<8B")
_struct_3I = struct.Struct("<3I")
``` |
{
"source": "JoshOrndorff/snippets",
"score": 3
} |
#### File: lib/Cname/Cname.py
```python
from snippets import Snippet
class Cname(Snippet):
'''
Renders a person's 'common name' or cname. If a common name is specified, it
is used, and if not the first name is used.
'''
def __init__(self, subject, ancestorTokens = [], children = {}):
# Call parent class constructor
super().__init__(subject, ancestorTokens, children)
# Data names, in xpath format, that are recognized by this snippet
self.supportedData = ['fname', 'cname']
# Tokens that this snippet will replace directly
self.tokens = []
#TODO this should be a class method or static method or something because it
# will be called before construction presumably.
def is_compatible(cls, subject):
''' Returns boolean whether the subject is compatible with this snippet. '''
#TODO implement this
return True
def generate_text(self):
if self.subject['cname'] is None:
self.text = self.subject['fname']
else:
self.text = self.subject['cname']
```
#### File: lib/Cocurricular/Cocurricular.py
```python
from os.path import dirname
from os import sep
from random import choice
from snippets.core.shared import get_comments
from snippets import Snippet
class Cocurricular(Snippet):
def __init__(self, subject, ancestorTokens = [], children = {}):
# Tokens are replaced in the order specified here.
#TODO This list feels redundant since I've defined functions / children for all tokens.
self.ownTokens = ['fname', 'lname', 'grade', 'course', 'effort', 'strength', 'weakness', 'prestopic', 'nextbot']
super().__init__(subject, ancestorTokens, children)
commentFile = dirname(__file__) + sep + "CocurricularComments.csv"
self.CL = get_comments(commentFile)
self.supportedData = ['course',
'strength',
'weakness',
'grade',
'effort',
'fname',
'lname',
'grade']
def token_fname(self):
return self.subject['fname']
def token_lname(self):
return self.subject['lname']
def token_course(self):
return self.subject['course']
def token_grade(self):
return self.subject['overall']
def token_effort(self):
return self.subject['effort']
def token_strength(self):
return self.subject['strength']
def token_weakness(self):
return self.subject['weakness']
def token_prestopic(self):
presOffsets = {'motor': 14, 'dispbutt': 18, 'touch': 22, 'speakerstatus': 26, 'gyro': 30, 'color': 34, 'wires': 38}
if self.subject['prestopic'] in presOffsets:
presOffset = presOffsets[self.subject['prestopic']]
return choice(self.CL[presOffset - int(self.subject['presentation'])])
elif self.subject['prestopic'] == "":
return "" # Optional argument intentionally left blank
else:
return "!!!!!Unsupported prestopic; correct or manually write!!!!!"
def token_nextbot(self):
botOffsets = {'gyro': 39, 'puppy': 40, 'arm': 41, 'color': 42, 'custom': 43}
if self.subject['nextbot'] in botOffsets:
botOffset = botOffsets[self.subject['nextbot']]
return choice(self.CL[botOffset])
elif self.subject['nextbot'] == "":
return "" # Optional argument intentionally left blank
else:
return "!!!!!Unsupported nextbot; correct or manually write!!!!!"
def is_compatible(cls, subject):
#TODO Implement This
pass
def generate_text(self):
# Open with basic information
self.text = "/fname /lname: /grade /effort\n"
# Opening comment based on overall grade
if self.subject['overall'] == 'EP':
self.text += choice(self.CL[0])
elif self.subject['overall'] == 'HP':
self.text += choice(self.CL[1])
elif self.subject['overall'] == 'P':
self.text += choice(self.CL[2])
elif self.subject['overall'] == 'LP':
self.text += choice(self.CL[3])
else:
self.text += choice(self.CL[3])
self.text += "!!!STUDENT DID NOT PASS. MANUALLY EDIT COMMENT!!!"
# Second comment if student struggled in a particular area
if self.subject['weakness'] != '':
self.text += choice(self.CL[4])
# Third comment if student excelled in a particular area
if self.subject['strength'] != '':
self.text += choice(self.CL[5])
# Participation
self.text += choice(self.CL[9 - int(self.subject['participation'])])
# Presentation. Comments are specific to topic and student's success.
if 'prestopic' in self.subject:
self.text += "/prestopic"
#TODO Obstacle course
# In the student spreadsheet I could enter a space separated list of what
# each students got points for in the obstacle course. Then jsut mention them
# in the narrative.
# for item in self.subject['obstaclecourse'].split():
# Other Robot. Comments are specific to robot
if 'nextbot' in self.subject:
self.text += "/nextbot"
# Conclusion
self.text += choice(self.CL[9])
```
#### File: ctyEnge/ContentProficiency/ContentProficiency.py
```python
from snippets import Snippet
from os.path import dirname
from os import sep
from random import choice
from snippets.core.shared import get_comments
class ContentProficiency(Snippet):
def __init__(self,subject, ancestorTokens = [], children = {}):
self.ownTokens = ['opening', 'progression', 'filler', 'cname']
#TODO there is nicer syntax for python3
super(ContentProficiency, self).__init__(subject, ancestorTokens, children)
commentFile = dirname(__file__) + sep + "comments.csv"
self.CL = get_comments(commentFile)
self.supportedData = ['cname', 'notes[@"content proficiency"]']
def token_opening(self):
return choice(self.CL[0])
def token_progression(self):
# Assumes number of problems presented is correlated with problem success
problemsPresented = len(self.subject.find('homework'))
if problemsPresented < 2:
return choice(self.CL[3])
else:
return choice(self.CL[2])
def token_filler(self):
# Only insert filler material if there aren't many student-specific notes.
notes = self.subject.find('notes[@category="content proficiency"]')
if len(notes) < 3:
return choice(self.CL[5])
return ""
def token_cname(self):
if self.subject['cname'] is None:
return self.subject['fname']
return self.subject['cname']
def is_compatible(cls, subject):
#TODO Implement This
pass
def generate_text(self):
self.text = "/opening "
self.text += "/progression "
self.text += "For example ... [Homework or Design Challenges] [Anything else from notes?]"
self.text += "/filler" # In case there isn't much specific info about the kid.
```
#### File: lib/LetterGrade/LetterGrade.py
```python
from snippets import Snippet
class LetterGrade(Snippet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.supportedData = ['grade']
self.tokens = []
#TODO this should be a class method or static method or something because it
# will be called before construction presumably.
def is_compatible(cls, subject):
''' Returns boolean whether the subject is compatible with this snippet. '''
try:
self.subject['grade']
except KeyErrror:
return False
else:
return True
#TODO support an optional way to override the default cutoffs via a subject datum.
def generate_text(self):
grade = float(self.subject['grade'])
if grade > 100 or grade < 0:
raise ValueError("Grade must be between 0 and 100")
cutoffs = [ 97, 93, 90, 87, 83, 80, 77, 73, 70, 67, 63, 60, 0]
letters = ['A+', 'A', 'A-', 'B+', 'B', 'B-', 'C+', 'C', 'C-', 'D+', 'D', 'D-', 'F']
for i in range(len(cutoffs)):
if grade >= cutoffs[i]:
self.text = letters[i]
break
``` |
{
"source": "joshourisman/django-pydantic-settings",
"score": 2
} |
#### File: settings_test/settings_test/urls.py
```python
from django.http import JsonResponse
from django.urls import path
def test_view(request):
return JsonResponse({"success": True})
urlpatterns = [
path("", test_view),
]
``` |
{
"source": "joshourisman/pirrip",
"score": 2
} |
#### File: joshourisman/pirrip/pirrip.py
```python
import re
from typing import List, Optional
import requests
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
from faunadb import query as q
from faunadb.client import FaunaClient
from faunadb.errors import BadRequest
from faunadb.errors import NotFound as FaunaPackageNotFound
from pydantic import BaseSettings
from pydantic.types import SecretStr
from rich.console import Console
app = FastAPI()
templates = Jinja2Templates(directory="templates")
class PirripSettings(BaseSettings):
PYPI_FALLBACK: Optional[bool] = True
FAUNADB_KEY: SecretStr
class Config:
env_prefix = "PIRRIP_"
settings = PirripSettings()
console = Console()
class PyPiPackageNotFound(Exception):
pass
class PyPiReleaseNotFound(Exception):
pass
class FaunaReleaseNotFound(Exception):
pass
def normalize(name: str) -> str:
return re.sub(r"[-_.]+", "-", name).lower()
async def get_package_by_name(package_name: str) -> dict:
client = FaunaClient(secret=settings.FAUNADB_KEY.get_secret_value())
return client.query(
q.get(q.match(q.index("package_by_name"), normalize(package_name)))
)
async def get_package_names() -> List[str]:
client = FaunaClient(secret=settings.FAUNADB_KEY.get_secret_value())
return client.query(q.paginate(q.match(q.index("package_names"))))["data"]
async def get_fauna_data(package_name: str, release: str = "") -> dict:
console.log(f"Querying FaunaDB for {package_name}.")
try:
fauna_package = await get_package_by_name(package_name)
package = fauna_package["data"]
except FaunaPackageNotFound as e:
if settings.PYPI_FALLBACK is True:
package = await get_pypi_data(package_name)
else:
raise e
else:
if bool(release) is True and release not in package["releases"].keys():
if settings.PYPI_FALLBACK is True:
package = await get_pypi_data(package_name)
if release not in package["releases"].keys():
raise PyPiReleaseNotFound
if bool(release) is True and release not in package["releases"].keys():
raise FaunaReleaseNotFound
return package
async def get_pypi_data(package_name: str) -> dict:
console.log(f"Requesting PyPi data for {package_name}.")
request_url = f"https://pypi.org/pypi/{package_name}/json"
response = requests.get(request_url)
if response.status_code == 404:
raise PyPiPackageNotFound
assert response.status_code == 200
package_data = response.json()
package_data["normalized_name"] = normalize(package_data["info"]["name"])
console.log(f"Logging PyPi data for {package_name} to FaunaDB.")
client = FaunaClient(secret=settings.FAUNADB_KEY.get_secret_value())
try:
client.query(
q.create(
q.collection("packages"),
{"data": package_data},
)
)
except BadRequest:
package = await get_package_by_name(package_name)
ref = package["ref"]
client.query(q.update(ref, {"data": package_data}))
return package_data
@app.get("/pypi/{package_name}/json")
async def package_info(package_name: str):
console.log(f"Attempting to fetch data for {package_name}.")
try:
package = await get_fauna_data(package_name)
except FaunaPackageNotFound:
raise HTTPException(
status_code=404, detail=f"{package_name} not found in Pirrip database."
)
except PyPiPackageNotFound:
raise HTTPException(
status_code=404, detail=f"{package_name} not found in PyPi database."
)
return package
@app.get("/pypi/{package_name}/{release}/json")
async def release_info(package_name: str, release: str):
console.log(f"Attempting to fetch data for {package_name}, release {release}.")
try:
package = await get_fauna_data(package_name, release)
except FaunaPackageNotFound:
raise HTTPException(
status_code=404, detail=f"{package_name} not found in Pirrip database."
)
except PyPiPackageNotFound:
raise HTTPException(
status_code=404, detail=f"{package_name} not found in PyPi database."
)
except FaunaReleaseNotFound:
raise HTTPException(
status_code=404,
detail=f"{package_name} {release} not found in Pirrip database.",
)
except PyPiReleaseNotFound:
raise HTTPException(
status_code=404,
detail=f"{package_name} {release} not found in PyPi database.",
)
return package
@app.get("/simple/", response_class=HTMLResponse)
async def list_packages(request: Request):
return templates.TemplateResponse(
"package_list.html", {"request": request, "packages": await get_package_names()}
)
@app.get("/simple/{package_name}/", response_class=HTMLResponse)
async def package_detail(request: Request, package_name: str):
package = await get_fauna_data(package_name)
return templates.TemplateResponse(
"package_detail.html",
{
"request": request,
"package_name": package_name,
"releases": package["releases"],
},
)
``` |
{
"source": "joshowen/airflow",
"score": 2
} |
#### File: cli/commands/test_webserver_command.py
```python
import os
import subprocess
import tempfile
import unittest
from time import sleep
from unittest import mock
import psutil
import pytest
from airflow import settings
from airflow.cli import cli_parser
from airflow.cli.commands import webserver_command
from airflow.cli.commands.webserver_command import get_num_ready_workers_running
from airflow.models import DagBag
from airflow.utils.cli import setup_locations
from tests.test_utils.config import conf_vars
class TestCLIGetNumReadyWorkersRunning(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag(include_examples=True)
cls.parser = cli_parser.get_parser()
def setUp(self):
self.gunicorn_master_proc = mock.Mock(pid=None)
self.children = mock.MagicMock()
self.child = mock.MagicMock()
self.process = mock.MagicMock()
def test_ready_prefix_on_cmdline(self):
self.child.cmdline.return_value = [settings.GUNICORN_WORKER_READY_PREFIX]
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(get_num_ready_workers_running(self.gunicorn_master_proc), 1)
def test_ready_prefix_on_cmdline_no_children(self):
self.process.children.return_value = []
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(get_num_ready_workers_running(self.gunicorn_master_proc), 0)
def test_ready_prefix_on_cmdline_zombie(self):
self.child.cmdline.return_value = []
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(get_num_ready_workers_running(self.gunicorn_master_proc), 0)
def test_ready_prefix_on_cmdline_dead_process(self):
self.child.cmdline.side_effect = psutil.NoSuchProcess(11347)
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(get_num_ready_workers_running(self.gunicorn_master_proc), 0)
def test_cli_webserver_debug(self):
env = os.environ.copy()
proc = psutil.Popen(["airflow", "webserver", "--debug"], env=env)
sleep(3) # wait for webserver to start
return_code = proc.poll()
self.assertEqual(
None,
return_code,
"webserver terminated with return code {} in debug mode".format(return_code))
proc.terminate()
proc.wait()
@pytest.mark.quarantined
class TestCliWebServer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
def setUp(self) -> None:
self._check_processes()
self._clean_pidfiles()
def _check_processes(self):
try:
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "--full", "--count", "airflow webserver"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "--count", "gunicorn"]).wait())
except: # noqa: E722
subprocess.Popen(["ps", "-ax"]).wait()
raise
def tearDown(self) -> None:
self._check_processes()
def _clean_pidfiles(self):
pidfile_webserver = setup_locations("webserver")[0]
pidfile_monitor = setup_locations("webserver-monitor")[0]
if os.path.exists(pidfile_webserver):
os.remove(pidfile_webserver)
if os.path.exists(pidfile_monitor):
os.remove(pidfile_monitor)
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as file:
return int(file.read())
except Exception: # pylint: disable=broad-except
sleep(1)
def test_cli_webserver_foreground(self):
# Run webserver in foreground and terminate it.
proc = subprocess.Popen(["airflow", "webserver"])
proc.terminate()
proc.wait()
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
proc = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
proc.terminate()
proc.wait()
def test_cli_webserver_background(self):
pidfile_webserver = setup_locations("webserver")[0]
pidfile_monitor = setup_locations("webserver-monitor")[0]
# Run webserver as daemon in background. Note that the wait method is not called.
subprocess.Popen(["airflow", "webserver", "--daemon"])
pid_monitor = self._wait_pidfile(pidfile_monitor)
self._wait_pidfile(pidfile_webserver)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "--full", "--count", "airflow webserver"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "--count", "gunicorn"]).wait())
# Terminate monitor process.
proc = psutil.Process(pid_monitor)
proc.terminate()
proc.wait()
# Patch for causing webserver timeout
@mock.patch("airflow.cli.commands.webserver_command.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
args = self.parser.parse_args(['webserver'])
with conf_vars({('webserver', 'web_server_master_timeout'): '10'}):
with self.assertRaises(SystemExit) as e:
webserver_command.webserver(args)
self.assertEqual(e.exception.code, 1)
```
#### File: tests/operators/test_python.py
```python
import copy
import datetime
import logging
import sys
import unittest
import unittest.mock
from collections import namedtuple
from datetime import date, timedelta
from subprocess import CalledProcessError
from typing import List
import funcsigs
from airflow.exceptions import AirflowException
from airflow.models import DAG, DagRun, TaskInstance as TI
from airflow.models.taskinstance import clear_task_instances
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import (
BranchPythonOperator, PythonOperator, PythonVirtualenvOperator, ShortCircuitOperator,
)
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
END_DATE = timezone.datetime(2016, 1, 2)
INTERVAL = timedelta(hours=12)
FROZEN_NOW = timezone.datetime(2016, 1, 2, 12, 1, 1)
TI_CONTEXT_ENV_VARS = ['AIRFLOW_CTX_DAG_ID',
'AIRFLOW_CTX_TASK_ID',
'AIRFLOW_CTX_EXECUTION_DATE',
'AIRFLOW_CTX_DAG_RUN_ID']
class Call:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def build_recording_function(calls_collection):
"""
We can not use a Mock instance as a PythonOperator callable function or some tests fail with a
TypeError: Object of type Mock is not JSON serializable
Then using this custom function recording custom Call objects for further testing
(replacing Mock.assert_called_with assertion method)
"""
def recording_function(*args, **kwargs):
calls_collection.append(Call(*args, **kwargs))
return recording_function
class TestPythonBase(unittest.TestCase):
"""Base test class for TestPythonOperator and TestPythonSensor classes"""
@classmethod
def setUpClass(cls):
super().setUpClass()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def setUp(self):
super().setUp()
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE})
self.addCleanup(self.dag.clear)
self.clear_run()
self.addCleanup(self.clear_run)
def tearDown(self):
super().tearDown()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def clear_run(self):
self.run = False
def _assert_calls_equal(self, first, second):
self.assertIsInstance(first, Call)
self.assertIsInstance(second, Call)
self.assertTupleEqual(first.args, second.args)
# eliminate context (conf, dag_run, task_instance, etc.)
test_args = ["an_int", "a_date", "a_templated_string"]
first.kwargs = {
key: value
for (key, value) in first.kwargs.items()
if key in test_args
}
second.kwargs = {
key: value
for (key, value) in second.kwargs.items()
if key in test_args
}
self.assertDictEqual(first.kwargs, second.kwargs)
class TestPythonOperator(TestPythonBase):
def do_run(self):
self.run = True
def is_run(self):
return self.run
def test_python_operator_run(self):
"""Tests that the python callable is invoked on task run."""
task = PythonOperator(
python_callable=self.do_run,
task_id='python_operator',
dag=self.dag)
self.assertFalse(self.is_run())
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
self.assertTrue(self.is_run())
def test_python_operator_python_callable_is_callable(self):
"""Tests that PythonOperator will only instantiate if
the python_callable argument is callable."""
not_callable = {}
with self.assertRaises(AirflowException):
PythonOperator(
python_callable=not_callable,
task_id='python_operator',
dag=self.dag)
not_callable = None
with self.assertRaises(AirflowException):
PythonOperator(
python_callable=not_callable,
task_id='python_operator',
dag=self.dag)
def test_python_callable_arguments_are_templatized(self):
"""Test PythonOperator op_args are templatized"""
recorded_calls = []
# Create a named tuple and ensure it is still preserved
# after the rendering is done
Named = namedtuple('Named', ['var1', 'var2'])
named_tuple = Named('{{ ds }}', 'unchanged')
task = PythonOperator(
task_id='python_operator',
# a Mock instance cannot be used as a callable function or test fails with a
# TypeError: Object of type Mock is not JSON serializable
python_callable=build_recording_function(recorded_calls),
op_args=[
4,
date(2019, 1, 1),
"dag {{dag.dag_id}} ran on {{ds}}.",
named_tuple
],
dag=self.dag)
self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING
)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
ds_templated = DEFAULT_DATE.date().isoformat()
self.assertEqual(1, len(recorded_calls))
self._assert_calls_equal(
recorded_calls[0],
Call(4,
date(2019, 1, 1),
"dag {} ran on {}.".format(self.dag.dag_id, ds_templated),
Named(ds_templated, 'unchanged'))
)
def test_python_callable_keyword_arguments_are_templatized(self):
"""Test PythonOperator op_kwargs are templatized"""
recorded_calls = []
task = PythonOperator(
task_id='python_operator',
# a Mock instance cannot be used as a callable function or test fails with a
# TypeError: Object of type Mock is not JSON serializable
python_callable=build_recording_function(recorded_calls),
op_kwargs={
'an_int': 4,
'a_date': date(2019, 1, 1),
'a_templated_string': "dag {{dag.dag_id}} ran on {{ds}}."
},
dag=self.dag)
self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING
)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
self.assertEqual(1, len(recorded_calls))
self._assert_calls_equal(
recorded_calls[0],
Call(an_int=4,
a_date=date(2019, 1, 1),
a_templated_string="dag {} ran on {}.".format(
self.dag.dag_id, DEFAULT_DATE.date().isoformat()))
)
def test_python_operator_shallow_copy_attr(self):
not_callable = lambda x: x
original_task = PythonOperator(
python_callable=not_callable,
task_id='python_operator',
op_kwargs={'certain_attrs': ''},
dag=self.dag
)
new_task = copy.deepcopy(original_task)
# shallow copy op_kwargs
self.assertEqual(id(original_task.op_kwargs['certain_attrs']),
id(new_task.op_kwargs['certain_attrs']))
# shallow copy python_callable
self.assertEqual(id(original_task.python_callable),
id(new_task.python_callable))
def test_conflicting_kwargs(self):
self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
external_trigger=False,
)
# dag is not allowed since it is a reserved keyword
def func(dag):
# An ValueError should be triggered since we're using dag as a
# reserved keyword
raise RuntimeError("Should not be triggered, dag: {}".format(dag))
python_operator = PythonOperator(
task_id='python_operator',
op_args=[1],
python_callable=func,
dag=self.dag
)
with self.assertRaises(ValueError) as context:
python_operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
self.assertTrue('dag' in context.exception, "'dag' not found in the exception")
def test_context_with_conflicting_op_args(self):
self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
external_trigger=False,
)
def func(custom, dag):
self.assertEqual(1, custom, "custom should be 1")
self.assertIsNotNone(dag, "dag should be set")
python_operator = PythonOperator(
task_id='python_operator',
op_kwargs={'custom': 1},
python_callable=func,
dag=self.dag
)
python_operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
def test_context_with_kwargs(self):
self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
external_trigger=False,
)
def func(**context):
# check if context is being set
self.assertGreater(len(context), 0, "Context has not been injected")
python_operator = PythonOperator(
task_id='python_operator',
op_kwargs={'custom': 1},
python_callable=func,
dag=self.dag
)
python_operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
class TestBranchOperator(unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def setUp(self):
self.dag = DAG('branch_operator_test',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL)
self.branch_1 = DummyOperator(task_id='branch_1', dag=self.dag)
self.branch_2 = DummyOperator(task_id='branch_2', dag=self.dag)
self.branch_3 = None
def tearDown(self):
super().tearDown()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def test_without_dag_run(self):
"""This checks the defensive against non existent tasks in a dag run"""
branch_op = BranchPythonOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: 'branch_1')
self.branch_1.set_upstream(branch_op)
self.branch_2.set_upstream(branch_op)
self.dag.clear()
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with create_session() as session:
tis = session.query(TI).filter(
TI.dag_id == self.dag.dag_id,
TI.execution_date == DEFAULT_DATE
)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
# should exist with state None
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_branch_list_without_dag_run(self):
"""This checks if the BranchPythonOperator supports branching off to a list of tasks."""
branch_op = BranchPythonOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: ['branch_1', 'branch_2'])
self.branch_1.set_upstream(branch_op)
self.branch_2.set_upstream(branch_op)
self.branch_3 = DummyOperator(task_id='branch_3', dag=self.dag)
self.branch_3.set_upstream(branch_op)
self.dag.clear()
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with create_session() as session:
tis = session.query(TI).filter(
TI.dag_id == self.dag.dag_id,
TI.execution_date == DEFAULT_DATE
)
expected = {
"make_choice": State.SUCCESS,
"branch_1": State.NONE,
"branch_2": State.NONE,
"branch_3": State.SKIPPED,
}
for ti in tis:
if ti.task_id in expected:
self.assertEqual(ti.state, expected[ti.task_id])
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_with_dag_run(self):
branch_op = BranchPythonOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: 'branch_1')
self.branch_1.set_upstream(branch_op)
self.branch_2.set_upstream(branch_op)
self.dag.clear()
dr = self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_with_skip_in_branch_downstream_dependencies(self):
branch_op = BranchPythonOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: 'branch_1')
branch_op >> self.branch_1 >> self.branch_2
branch_op >> self.branch_2
self.dag.clear()
dr = self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.NONE)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_with_skip_in_branch_downstream_dependencies2(self):
branch_op = BranchPythonOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: 'branch_2')
branch_op >> self.branch_1 >> self.branch_2
branch_op >> self.branch_2
self.dag.clear()
dr = self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.NONE)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_xcom_push(self):
branch_op = BranchPythonOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: 'branch_1')
self.branch_1.set_upstream(branch_op)
self.branch_2.set_upstream(branch_op)
self.dag.clear()
dr = self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(
ti.xcom_pull(task_ids='make_choice'), 'branch_1')
def test_clear_skipped_downstream_task(self):
"""
After a downstream task is skipped by BranchPythonOperator, clearing the skipped task
should not cause it to be executed.
"""
branch_op = BranchPythonOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: 'branch_1')
branches = [self.branch_1, self.branch_2]
branch_op >> branches
self.dag.clear()
dr = self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
for task in branches:
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
children_tis = [ti for ti in tis if ti.task_id in branch_op.get_direct_relative_ids()]
# Clear the children tasks.
with create_session() as session:
clear_task_instances(children_tis, session=session, dag=self.dag)
# Run the cleared tasks again.
for task in branches:
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
# Check if the states are correct after children tasks are cleared.
for ti in dr.get_task_instances():
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
class TestShortCircuitOperator(unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def tearDown(self):
super().tearDown()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def test_without_dag_run(self):
"""This checks the defensive against non existent tasks in a dag run"""
value = False
dag = DAG('shortcircuit_operator_test_without_dag_run',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE
},
schedule_interval=INTERVAL)
short_op = ShortCircuitOperator(task_id='make_choice',
dag=dag,
python_callable=lambda: value)
branch_1 = DummyOperator(task_id='branch_1', dag=dag)
branch_1.set_upstream(short_op)
branch_2 = DummyOperator(task_id='branch_2', dag=dag)
branch_2.set_upstream(branch_1)
upstream = DummyOperator(task_id='upstream', dag=dag)
upstream.set_downstream(short_op)
dag.clear()
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with create_session() as session:
tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.execution_date == DEFAULT_DATE
)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
# should not exist
raise ValueError(f'Invalid task id {ti.task_id} found!')
elif ti.task_id == 'branch_1' or ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
value = True
dag.clear()
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
# should not exist
raise ValueError(f'Invalid task id {ti.task_id} found!')
elif ti.task_id == 'branch_1' or ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.NONE)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_with_dag_run(self):
value = False
dag = DAG('shortcircuit_operator_test_with_dag_run',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE
},
schedule_interval=INTERVAL)
short_op = ShortCircuitOperator(task_id='make_choice',
dag=dag,
python_callable=lambda: value)
branch_1 = DummyOperator(task_id='branch_1', dag=dag)
branch_1.set_upstream(short_op)
branch_2 = DummyOperator(task_id='branch_2', dag=dag)
branch_2.set_upstream(branch_1)
upstream = DummyOperator(task_id='upstream', dag=dag)
upstream.set_downstream(short_op)
dag.clear()
logging.error("Tasks %s", dag.tasks)
dr = dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
upstream.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 4)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1' or ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
value = True
dag.clear()
dr.verify_integrity()
upstream.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 4)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1' or ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.NONE)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
def test_clear_skipped_downstream_task(self):
"""
After a downstream task is skipped by ShortCircuitOperator, clearing the skipped task
should not cause it to be executed.
"""
dag = DAG('shortcircuit_clear_skipped_downstream_task',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE
},
schedule_interval=INTERVAL)
short_op = ShortCircuitOperator(task_id='make_choice',
dag=dag,
python_callable=lambda: False)
downstream = DummyOperator(task_id='downstream', dag=dag)
short_op >> downstream
dag.clear()
dr = dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
downstream.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'downstream':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
# Clear downstream
with create_session() as session:
clear_task_instances([t for t in tis if t.task_id == "downstream"],
session=session,
dag=dag)
# Run downstream again
downstream.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
# Check if the states are correct.
for ti in dr.get_task_instances():
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'downstream':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f'Invalid task id {ti.task_id} found!')
virtualenv_string_args: List[str] = []
class TestPythonVirtualenvOperator(unittest.TestCase):
def setUp(self):
super().setUp()
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL)
self.addCleanup(self.dag.clear)
def _run_as_operator(self, fn, python_version=sys.version_info[0], **kwargs):
task = PythonVirtualenvOperator(
python_callable=fn,
python_version=python_version,
task_id='task',
dag=self.dag,
**kwargs)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
def test_dill_warning(self):
def f():
pass
with self.assertRaises(AirflowException):
PythonVirtualenvOperator(
python_callable=f,
task_id='task',
dag=self.dag,
use_dill=True,
system_site_packages=False)
def test_no_requirements(self):
"""Tests that the python callable is invoked on task run."""
def f():
pass
self._run_as_operator(f)
def test_no_system_site_packages(self):
def f():
try:
import funcsigs # noqa: F401 # pylint: disable=redefined-outer-name,reimported,unused-import
except ImportError:
return True
raise Exception
self._run_as_operator(f, system_site_packages=False, requirements=['dill'])
def test_system_site_packages(self):
def f():
import funcsigs # noqa: F401 # pylint: disable=redefined-outer-name,reimported,unused-import
self._run_as_operator(f, requirements=['funcsigs'], system_site_packages=True)
def test_with_requirements_pinned(self):
self.assertNotEqual(
'0.4', funcsigs.__version__, 'Please update this string if this fails')
def f():
import funcsigs # noqa: F401 # pylint: disable=redefined-outer-name,reimported
if funcsigs.__version__ != '0.4':
raise Exception
self._run_as_operator(f, requirements=['funcsigs==0.4'])
def test_unpinned_requirements(self):
def f():
import funcsigs # noqa: F401 # pylint: disable=redefined-outer-name,reimported,unused-import
self._run_as_operator(
f, requirements=['funcsigs', 'dill'], system_site_packages=False)
def test_range_requirements(self):
def f():
import funcsigs # noqa: F401 # pylint: disable=redefined-outer-name,reimported,unused-import
self._run_as_operator(
f, requirements=['funcsigs>1.0', 'dill'], system_site_packages=False)
def test_fail(self):
def f():
raise Exception
with self.assertRaises(CalledProcessError):
self._run_as_operator(f)
def test_python_2(self):
def f():
{}.iteritems() # pylint: disable=no-member
self._run_as_operator(f, python_version=2, requirements=['dill'])
def test_python_2_7(self):
def f():
{}.iteritems() # pylint: disable=no-member
return True
self._run_as_operator(f, python_version='2.7', requirements=['dill'])
def test_python_3(self):
def f():
import sys # pylint: disable=reimported,unused-import,redefined-outer-name
print(sys.version)
try:
{}.iteritems() # pylint: disable=no-member
except AttributeError:
return
raise Exception
self._run_as_operator(f, python_version=3, use_dill=False, requirements=['dill'])
@staticmethod
def _invert_python_major_version():
if sys.version_info[0] == 2:
return 3
else:
return 2
def test_wrong_python_op_args(self):
if sys.version_info[0] == 2:
version = 3
else:
version = 2
def f():
pass
with self.assertRaises(AirflowException):
self._run_as_operator(f, python_version=version, op_args=[1])
def test_without_dill(self):
def f(a):
return a
self._run_as_operator(f, system_site_packages=False, use_dill=False, op_args=[4])
def test_string_args(self):
def f():
global virtualenv_string_args # pylint: disable=global-statement
print(virtualenv_string_args)
if virtualenv_string_args[0] != virtualenv_string_args[2]:
raise Exception
self._run_as_operator(
f, python_version=self._invert_python_major_version(), string_args=[1, 2, 1])
def test_with_args(self):
def f(a, b, c=False, d=False):
if a == 0 and b == 1 and c and not d:
return True
else:
raise Exception
self._run_as_operator(f, op_args=[0, 1], op_kwargs={'c': True})
def test_return_none(self):
def f():
return None
self._run_as_operator(f)
def test_lambda(self):
with self.assertRaises(AirflowException):
PythonVirtualenvOperator(
python_callable=lambda x: 4,
task_id='task',
dag=self.dag)
def test_nonimported_as_arg(self):
def f(_):
return None
self._run_as_operator(f, op_args=[datetime.datetime.utcnow()])
def test_context(self):
def f(templates_dict):
return templates_dict['ds']
self._run_as_operator(f, templates_dict={'ds': '{{ ds }}'})
``` |
{
"source": "JoshOY/DataStructureCourseDesign",
"score": 4
} |
#### File: PROB10/my_sort/insertionSort.py
```python
import copy
def insertion_sort(sorting_list):
step = 0
ls = copy.deepcopy(sorting_list)
for i in range(1, len(ls)):
tmp = ls[i]
j = i
while j > 0 and ls[j-1] > tmp:
ls[j] = ls[j-1]
j -= 1
step += 1
ls[j] = tmp
return (ls, step)
if __name__ == "__main__":
sList=[ 13, 14, 94, 33, 82, 25, 59, 94, 65, 23, 45, 27, 73, 25, 39, 10 ]
print(insertion_sort(sList))
```
#### File: PROB10/my_sort/mergeSort.py
```python
import copy
merge_step = 0
def merge_sort(sorting_list):
global merge_step
if(len(sorting_list) <= 1):
return sorting_list
def merge(left, right):
global merge_step
rtn = []
while len(left) != 0 and len(right) != 0:
rtn.append(left.pop(0) if left[0] <= right[0] else right.pop(0))
merge_step += 1
while len(left) != 0:
merge_step += 1
rtn.append(left.pop(0))
while len(right) != 0:
merge_step += 1
rtn.append(right.pop(0))
return rtn
ls = copy.deepcopy(sorting_list)
middle_index = int(len(ls) / 2)
left = merge_sort(ls[0:middle_index])
right = merge_sort(ls[middle_index:])
return merge(left, right)
if __name__ == "__main__":
sList=[13, 14, 94, 33, 82, 25, 59, 94, 65, 23, 45, 27, 73, 25, 39, 10 ]
print(merge_sort(sList), merge_step)
```
#### File: PROB10/my_sort/quickSort.py
```python
import copy
qsort_step = 0
def quick_sort(sorting_list):
global qsort_step
ls = copy.deepcopy(sorting_list)
if len(sorting_list) == 0:
return []
elif len(sorting_list) == 1:
return sorting_list
else:
pivot = ls[0]
qsort_step += 1
left = quick_sort([x for x in ls[1:] if x < pivot])
right = quick_sort([x for x in ls[1:] if x >= pivot])
return left + [pivot] + right
if __name__ == "__main__":
sList = [13, 14, 94, 33, 82, 25, 59, 94, 65, 23, 45, 27, 73, 25, 39, 10]
print([quick_sort(sList), qsort_step])
``` |
{
"source": "JoshOY/formlang-ply",
"score": 3
} |
#### File: app/controllers/ast_generate.py
```python
from aiohttp import web
from ..routes import app_routes
from ..utils.formlang.ast_parser import FormLangASTParser
def transform_structure(ast):
ret = {}
if type(ast) == tuple:
ret["text"] = {
"name": ast[0],
}
if len(ast) > 1:
ret["children"] = []
for n in ast[1:]:
ret["children"].append(transform_structure(n))
else:
ret["text"] = {
"title": "value",
"desc": str(ast),
}
return ret
@app_routes.post('/api/ast_generate')
async def post_ast_generate(request):
if request.body_exists:
body_data = await request.post()
if 'rawCode' not in body_data:
return web.HTTPBadRequest()
raw_code = body_data['rawCode']
print('------------------')
print(raw_code)
print('------------------')
else:
return web.HTTPBadRequest()
ast_parser = FormLangASTParser()
result = ast_parser.input(raw_code)
ast_parser.restart()
return web.json_response({
'ok': True,
'result': transform_structure(result),
})
```
#### File: app/controllers/ast_view.py
```python
from aiohttp import web
import aiohttp_jinja2
import json
from ..routes import app_routes
@app_routes.view('/')
class ASTView(web.View):
@aiohttp_jinja2.template('index.jinja2')
async def get(self):
return { 'src_code_init': None }
@aiohttp_jinja2.template('index.jinja2')
async def post(self):
request_body = await self.request.post()
src_code_init = request_body['srccode']
return {
'src_code_init': json.dumps(src_code_init) if src_code_init else None,
}
```
#### File: formlang-ply/app/server.py
```python
from aiohttp import web
from .routes import app_routes
from .controllers import *
from .utils.path_resolver import PathResolver
import aiohttp_jinja2
import jinja2
def run():
app = web.Application()
# setup template rendering engine
template_path = PathResolver.resolve_by_root('app/templates')
aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(str(template_path)))
# set up static file directory
static_path = PathResolver.resolve_by_root('app/static')
app.router.add_static('/public', static_path)
app.add_routes(app_routes)
web.run_app(app)
``` |
{
"source": "joshp112358/Cirq",
"score": 2
} |
#### File: contrib/qcircuit/qcircuit_test.py
```python
import cirq
import cirq.contrib.qcircuit as ccq
import cirq.testing as ct
def assert_has_qcircuit_diagram(
actual: cirq.Circuit,
desired: str,
**kwargs) -> None:
"""Determines if a given circuit has the desired qcircuit diagram.
Args:
actual: The circuit that was actually computed by some process.
desired: The desired qcircuit diagram as a string. Newlines at the
beginning and whitespace at the end are ignored.
**kwargs: Keyword arguments to be passed to
circuit_to_latex_using_qcircuit.
"""
actual_diagram = ccq.circuit_to_latex_using_qcircuit(actual, **kwargs
).lstrip('\n').rstrip()
desired_diagram = desired.lstrip("\n").rstrip()
assert actual_diagram == desired_diagram, (
"Circuit's qcircuit diagram differs from the desired diagram.\n"
'\n'
'Diagram of actual circuit:\n'
'{}\n'
'\n'
'Desired qcircuit diagram:\n'
'{}\n'
'\n'
'Highlighted differences:\n'
'{}\n'.format(actual_diagram, desired_diagram,
ct.highlight_text_differences(actual_diagram,
desired_diagram))
)
def test_fallback_diagram():
class MagicGate(cirq.ThreeQubitGate):
def __str__(self):
return 'MagicGate'
class MagicOp(cirq.Operation):
def __init__(self, *qubits):
self._qubits = qubits
def with_qubits(self, *new_qubits):
return MagicOp(*new_qubits)
@property
def qubits(self):
return self._qubits
def __str__(self):
return 'MagicOperate'
circuit = cirq.Circuit(
MagicOp(cirq.NamedQubit('b')),
MagicGate().on(cirq.NamedQubit('b'), cirq.NamedQubit('a'),
cirq.NamedQubit('c')))
expected_diagram = r"""
\Qcircuit @R=1em @C=0.75em {
\\
&\lstick{\text{a}}& \qw& \qw&\gate{\text{\#2}} \qw &\qw\\
&\lstick{\text{b}}& \qw&\gate{\text{MagicOperate}} \qw&\gate{\text{MagicGate}} \qw\qwx&\qw\\
&\lstick{\text{c}}& \qw& \qw&\gate{\text{\#3}} \qw\qwx&\qw\\
\\
}""".strip()
assert_has_qcircuit_diagram(circuit, expected_diagram)
def test_teleportation_diagram():
ali = cirq.NamedQubit('alice')
car = cirq.NamedQubit('carrier')
bob = cirq.NamedQubit('bob')
circuit = cirq.Circuit(
cirq.H(car), cirq.CNOT(car, bob),
cirq.X(ali)**0.5, cirq.CNOT(ali, car), cirq.H(ali),
[cirq.measure(ali), cirq.measure(car)], cirq.CNOT(car, bob),
cirq.CZ(ali, bob))
expected_diagram = r"""
\Qcircuit @R=1em @C=0.75em {
\\
&\lstick{\text{alice}}& \qw&\gate{\text{X}^{0.5}} \qw& \qw &\control \qw &\gate{\text{H}} \qw&\meter \qw &\control \qw &\qw\\
&\lstick{\text{carrier}}& \qw&\gate{\text{H}} \qw&\control \qw &\targ \qw\qwx&\meter \qw&\control \qw & \qw\qwx&\qw\\
&\lstick{\text{bob}}& \qw& \qw&\targ \qw\qwx& \qw & \qw&\targ \qw\qwx&\control \qw\qwx&\qw\\
\\
}""".strip()
assert_has_qcircuit_diagram(circuit, expected_diagram,
qubit_order=cirq.QubitOrder.explicit([ali, car, bob]))
def test_other_diagram():
a, b, c = cirq.LineQubit.range(3)
circuit = cirq.Circuit(cirq.X(a), cirq.Y(b), cirq.Z(c))
expected_diagram = r"""
\Qcircuit @R=1em @C=0.75em {
\\
&\lstick{\text{0}}& \qw&\targ \qw&\qw\\
&\lstick{\text{1}}& \qw&\gate{\text{Y}} \qw&\qw\\
&\lstick{\text{2}}& \qw&\gate{\text{Z}} \qw&\qw\\
\\
}""".strip()
assert_has_qcircuit_diagram(circuit, expected_diagram)
def test_qcircuit_qubit_namer():
from cirq.contrib.qcircuit import qcircuit_diagram
assert(qcircuit_diagram.qcircuit_qubit_namer(cirq.NamedQubit('q'))
== r'\lstick{\text{q}}&')
assert(qcircuit_diagram.qcircuit_qubit_namer(cirq.NamedQubit('q_1'))
== r'\lstick{\text{q\_1}}&')
assert(qcircuit_diagram.qcircuit_qubit_namer(cirq.NamedQubit('q^1'))
== r'\lstick{\text{q\textasciicircum{}1}}&')
assert(qcircuit_diagram.qcircuit_qubit_namer(cirq.NamedQubit('q_{1}'))
== r'\lstick{\text{q\_\{1\}}}&')
```
#### File: Cirq/cirq/_doc.py
```python
from typing import Any, Dict, NamedTuple, Optional
DocProperties = NamedTuple(
'DocProperties',
[
('doc_string', Optional[str]),
],
)
RECORDED_CONST_DOCS: Dict[int, DocProperties] = {}
def document(value: Any, doc_string: Optional[str] = None):
"""Stores documentation details about the given value.
This method is used to associate a docstring with global constants. It is
also used to indicate that a private method should be included in the public
documentation (e.g. when documenting protocols or arithmetic operations).
The given documentation information is filed under `id(value)` in
`cirq._doc.RECORDED_CONST_DOCS`.
Args:
value: The value to associate with documentation information.
doc_string: The doc string to associate with the value. Defaults to the
value's __doc__ attribute.
Returns:
The given value.
"""
docs = DocProperties(doc_string=doc_string)
RECORDED_CONST_DOCS[id(value)] = docs
return value
```
#### File: cirq/experiments/fidelity_estimation_test.py
```python
import itertools
from typing import Sequence
import numpy as np
import pytest
import cirq
def sample_noisy_bitstrings(circuit: cirq.Circuit,
qubit_order: Sequence[cirq.Qid],
depolarization: float,
repetitions: int) -> np.ndarray:
assert 0 <= depolarization <= 1
dim = np.product(circuit.qid_shape())
n_incoherent = int(depolarization * repetitions)
n_coherent = repetitions - n_incoherent
incoherent_samples = np.random.randint(dim, size=n_incoherent)
circuit_with_measurements = cirq.Circuit(
circuit, cirq.measure(*qubit_order, key='m'))
r = cirq.sample(circuit_with_measurements, repetitions=n_coherent)
coherent_samples = r.data['m'].to_numpy()
return np.concatenate((coherent_samples, incoherent_samples))
def make_random_quantum_circuit(qubits: Sequence[cirq.Qid],
depth: int) -> cirq.Circuit:
SQ_GATES = [cirq.X**0.5, cirq.Y**0.5, cirq.T]
circuit = cirq.Circuit()
cz_start = 0
for q in qubits:
circuit.append(cirq.H(q))
for _ in range(depth):
for q in qubits:
random_gate = SQ_GATES[np.random.randint(len(SQ_GATES))]
circuit.append(random_gate(q))
for q0, q1 in zip(itertools.islice(qubits, cz_start, None, 2),
itertools.islice(qubits, cz_start + 1, None, 2)):
circuit.append(cirq.CNOT(q0, q1))
cz_start = 1 - cz_start
for q in qubits:
circuit.append(cirq.H(q))
return circuit
@pytest.mark.parametrize('depolarization, estimator',
itertools.product(
(0.0, 0.2, 0.7, 1.0),
(cirq.hog_score_xeb_fidelity_from_probabilities,
cirq.linear_xeb_fidelity_from_probabilities,
cirq.log_xeb_fidelity_from_probabilities)))
def test_xeb_fidelity(depolarization, estimator):
prng_state = np.random.get_state()
np.random.seed(0)
fs = []
for _ in range(10):
qubits = cirq.LineQubit.range(5)
circuit = make_random_quantum_circuit(qubits, depth=12)
bitstrings = sample_noisy_bitstrings(circuit,
qubits,
depolarization,
repetitions=5000)
f = cirq.xeb_fidelity(circuit, bitstrings, qubits, estimator=estimator)
amplitudes = cirq.final_wavefunction(circuit)
f2 = cirq.xeb_fidelity(circuit,
bitstrings,
qubits,
amplitudes=amplitudes,
estimator=estimator)
assert np.abs(f - f2) < 1e-6
fs.append(f)
estimated_fidelity = np.mean(fs)
expected_fidelity = 1 - depolarization
assert np.isclose(estimated_fidelity, expected_fidelity, atol=0.04)
np.random.set_state(prng_state)
def test_linear_and_log_xeb_fidelity():
prng_state = np.random.get_state()
np.random.seed(0)
depolarization = 0.5
fs_log = []
fs_lin = []
for _ in range(10):
qubits = cirq.LineQubit.range(5)
circuit = make_random_quantum_circuit(qubits, depth=12)
bitstrings = sample_noisy_bitstrings(circuit,
qubits,
depolarization=depolarization,
repetitions=5000)
f_log = cirq.log_xeb_fidelity(circuit, bitstrings, qubits)
f_lin = cirq.linear_xeb_fidelity(circuit, bitstrings, qubits)
fs_log.append(f_log)
fs_lin.append(f_lin)
assert np.isclose(np.mean(fs_log), 1 - depolarization, atol=0.01)
assert np.isclose(np.mean(fs_lin), 1 - depolarization, atol=0.09)
np.random.set_state(prng_state)
def test_xeb_fidelity_invalid_qubits():
q0, q1, q2 = cirq.LineQubit.range(3)
circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1))
bitstrings = sample_noisy_bitstrings(circuit, (q0, q1, q2), 0.9, 10)
with pytest.raises(ValueError):
cirq.xeb_fidelity(circuit, bitstrings, (q0, q2))
def test_xeb_fidelity_invalid_bitstrings():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1))
bitstrings = [0, 1, 2, 3, 4]
with pytest.raises(ValueError):
cirq.xeb_fidelity(circuit, bitstrings, (q0, q1))
def test_xeb_fidelity_tuple_input():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1))
bitstrings = [0, 1, 2]
f1 = cirq.xeb_fidelity(circuit, bitstrings, (q0, q1))
f2 = cirq.xeb_fidelity(circuit, tuple(bitstrings), (q0, q1))
assert f1 == f2
```
#### File: cirq/experiments/random_quantum_circuit_generation_test.py
```python
from typing import (Callable, Dict, Iterable, List, Optional, Sequence, Set,
Tuple, cast)
import numpy as np
import pytest
import cirq
from cirq.experiments import (
GridInteractionLayer,
random_rotations_between_grid_interaction_layers_circuit)
SINGLE_QUBIT_LAYER = Dict[cirq.GridQubit, Optional[cirq.Gate]]
def _syc_with_adjacent_z_rotations(a: cirq.GridQubit, b: cirq.GridQubit,
prng: np.random.RandomState):
z_exponents = [prng.uniform(0, 1) for _ in range(4)]
yield cirq.Z(a)**z_exponents[0]
yield cirq.Z(b)**z_exponents[1]
yield cirq.google.SYC(a, b)
yield cirq.Z(a)**z_exponents[2]
yield cirq.Z(b)**z_exponents[3]
@pytest.mark.parametrize(
'qubits, depth, two_qubit_op_factory, pattern, '
'single_qubit_gates, add_final_single_qubit_layer, '
'seed, expected_circuit_length, single_qubit_layers_slice, '
'two_qubit_layers_slice', (
(cirq.GridQubit.rect(4, 3), 20, lambda a, b, _: cirq.google.SYC(a, b),
cirq.experiments.GRID_STAGGERED_PATTERN,
(cirq.X**0.5, cirq.Y**0.5, cirq.Z**
0.5), True, 1234, 41, slice(None, None, 2), slice(1, None, 2)),
(cirq.GridQubit.rect(4, 5), 21, lambda a, b, _: cirq.google.SYC(a, b),
cirq.experiments.GRID_ALIGNED_PATTERN,
(cirq.X**0.5, cirq.Y**0.5, cirq.Z**
0.5), True, 1234, 43, slice(None, None, 2), slice(1, None, 2)),
(cirq.GridQubit.rect(5, 4), 22, _syc_with_adjacent_z_rotations,
cirq.experiments.GRID_STAGGERED_PATTERN,
(cirq.X**0.5, cirq.Y**0.5, cirq.Z**
0.5), True, 1234, 89, slice(None, None, 4), slice(2, None, 4)),
(cirq.GridQubit.rect(5, 5), 23, lambda a, b, _: cirq.google.SYC(a, b),
cirq.experiments.GRID_ALIGNED_PATTERN,
(cirq.X**0.5, cirq.Y**0.5, cirq.Z**
0.5), False, 1234, 46, slice(None, None, 2), slice(1, None, 2)),
(cirq.GridQubit.rect(5, 5), 24, lambda a, b, _: cirq.google.SYC(a, b),
cirq.experiments.GRID_ALIGNED_PATTERN, (cirq.X**0.5, cirq.X**0.5),
True, 1234, 49, slice(None, None, 2), slice(1, None, 2)),
))
def test_random_rotations_between_grid_interaction_layers(
qubits: Iterable[cirq.GridQubit], depth: int,
two_qubit_op_factory: Callable[
[cirq.GridQubit, cirq.GridQubit, np.random.RandomState], cirq.
OP_TREE], pattern: Sequence[GridInteractionLayer],
single_qubit_gates: Sequence[cirq.Gate],
add_final_single_qubit_layer: bool, seed: cirq.value.RANDOM_STATE_LIKE,
expected_circuit_length: int, single_qubit_layers_slice: slice,
two_qubit_layers_slice: slice):
qubits = set(qubits)
circuit = random_rotations_between_grid_interaction_layers_circuit(
qubits,
depth,
two_qubit_op_factory=two_qubit_op_factory,
pattern=pattern,
single_qubit_gates=single_qubit_gates,
add_final_single_qubit_layer=add_final_single_qubit_layer,
seed=seed)
assert len(circuit) == expected_circuit_length
_validate_single_qubit_layers(
qubits,
cast(Sequence[cirq.Moment], circuit[single_qubit_layers_slice]),
non_repeating_layers=len(set(single_qubit_gates)) > 1)
_validate_two_qubit_layers(
qubits, cast(Sequence[cirq.Moment], circuit[two_qubit_layers_slice]),
pattern)
def test_grid_interaction_layer_repr():
layer = GridInteractionLayer(col_offset=0, vertical=True, stagger=False)
assert repr(layer) == ('cirq.experiments.GridInteractionLayer('
'col_offset=0, vertical=True, stagger=False)')
def _validate_single_qubit_layers(qubits: Set[cirq.GridQubit],
moments: Sequence[cirq.Moment],
non_repeating_layers: bool = True) -> None:
previous_single_qubit_gates = {q: None for q in qubits
} # type: SINGLE_QUBIT_LAYER
for moment in moments:
# All qubits are acted upon
assert moment.qubits == qubits
for op in moment:
# Operation is single-qubit
assert cirq.num_qubits(op) == 1
if non_repeating_layers:
# Gate differs from previous single-qubit gate on this qubit
q = cast(cirq.GridQubit, op.qubits[0])
assert op.gate != previous_single_qubit_gates[q]
previous_single_qubit_gates[q] = op.gate
def _validate_two_qubit_layers(
qubits: Set[cirq.GridQubit], moments: Sequence[cirq.Moment],
pattern: Sequence[cirq.experiments.GridInteractionLayer]) -> None:
coupled_qubit_pairs = _coupled_qubit_pairs(qubits)
for i, moment in enumerate(moments):
active_pairs = set()
for op in moment:
# Operation is two-qubit
assert cirq.num_qubits(op) == 2
# Operation fits pattern
assert op.qubits in pattern[i % len(pattern)]
active_pairs.add(op.qubits)
# All interactions that should be in this layer are present
assert all(pair in active_pairs
for pair in coupled_qubit_pairs
if pair in pattern[i % len(pattern)])
def _coupled_qubit_pairs(qubits: Set['cirq.GridQubit'],
) -> List[Tuple['cirq.GridQubit', 'cirq.GridQubit']]:
pairs = []
for qubit in qubits:
def add_pair(neighbor: 'cirq.GridQubit'):
if neighbor in qubits:
pairs.append((qubit, neighbor))
add_pair(cirq.GridQubit(qubit.row, qubit.col + 1))
add_pair(cirq.GridQubit(qubit.row + 1, qubit.col))
return pairs
```
#### File: google/devices/serializable_device.py
```python
from typing import (Callable, cast, Dict, Iterable, Optional, List, Set, Tuple,
Type, TYPE_CHECKING, FrozenSet)
from cirq import circuits, devices
from cirq.google import serializable_gate_set
from cirq.google.api import v2
from cirq.value import Duration
if TYPE_CHECKING:
import cirq
class _GateDefinition:
"""Class for keeping track of gate definitions within SerializableDevice"""
def __init__(
self,
duration: 'cirq.DURATION_LIKE',
target_set: Set[Tuple['cirq.Qid', ...]],
number_of_qubits: int,
is_permutation: bool,
can_serialize_predicate: Callable[['cirq.Operation'], bool] = lambda
x: True,
):
self.duration = Duration(duration)
self.target_set = target_set
self.is_permutation = is_permutation
self.number_of_qubits = number_of_qubits
self.can_serialize_predicate = can_serialize_predicate
# Compute the set of all qubits in all target sets.
self.flattened_qubits = {
q for qubit_tuple in target_set for q in qubit_tuple
}
def with_can_serialize_predicate(
self, can_serialize_predicate: Callable[['cirq.Operation'], bool]
) -> '_GateDefinition':
"""Creates a new _GateDefintion as a copy of the existing definition
but with a new with_can_serialize_predicate. This is useful if multiple
definitions exist for the same gate, but with different conditions.
An example is if gates at certain angles of a gate take longer or are
not allowed.
"""
return _GateDefinition(
self.duration,
self.target_set,
self.number_of_qubits,
self.is_permutation,
can_serialize_predicate,
)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.__dict__ == other.__dict__
class SerializableDevice(devices.Device):
"""Device object generated from a device specification proto.
Given a device specification proto and a gate_set to translate the
serialized gate_ids to cirq Gates, this will generate a Device that can
verify operations and circuits for the hardware specified by the device.
Expected usage is through constructing this class through a proto using
the static function call from_proto().
This class only supports GridQubits and NamedQubits. NamedQubits with names
that conflict (such as "4_3") may be converted to GridQubits on
deserialization.
"""
def __init__(
self, qubits: List['cirq.Qid'],
gate_definitions: Dict[Type['cirq.Gate'], List[_GateDefinition]]):
"""Constructor for SerializableDevice using python objects.
Note that the preferred method of constructing this object is through
the static from_proto() call.
Args:
qubits: A list of valid Qid for the device.
gate_definitions: Maps cirq gates to device properties for that
gate.
"""
self.qubits = qubits
self.gate_definitions = gate_definitions
def qubit_set(self) -> FrozenSet['cirq.Qid']:
return frozenset(self.qubits)
@classmethod
def from_proto(
cls, proto: v2.device_pb2.DeviceSpecification,
gate_sets: Iterable[serializable_gate_set.SerializableGateSet]
) -> 'SerializableDevice':
"""
Args:
proto: A proto describing the qubits on the device, as well as the
supported gates and timing information.
gate_set: A SerializableGateSet that can translate the gate_ids
into cirq Gates.
"""
# Store target sets, since they are refered to by name later
allowed_targets: Dict[str, Set[Tuple['cirq.Qid', ...]]] = {}
permutation_ids: Set[str] = set()
for ts in proto.valid_targets:
allowed_targets[ts.name] = cls._create_target_set(ts)
if ts.target_ordering == v2.device_pb2.TargetSet.SUBSET_PERMUTATION:
permutation_ids.add(ts.name)
# Store gate definitions from proto
gate_definitions: Dict[str, _GateDefinition] = {}
for gs in proto.valid_gate_sets:
for gate_def in gs.valid_gates:
# Combine all valid targets in the gate's listed target sets
gate_target_set = {
target for ts_name in gate_def.valid_targets
for target in allowed_targets[ts_name]
}
which_are_permutations = [
t in permutation_ids for t in gate_def.valid_targets
]
is_permutation = any(which_are_permutations)
if is_permutation:
if not all(which_are_permutations):
raise NotImplementedError(
f'Id {gate_def.id} in {gs.name} mixes '
'SUBSET_PERMUTATION with other types which is not '
'currently allowed.')
gate_definitions[gate_def.id] = _GateDefinition(
duration=Duration(picos=gate_def.gate_duration_picos),
target_set=gate_target_set,
is_permutation=is_permutation,
number_of_qubits=gate_def.number_of_qubits)
# Loop through serializers and map gate_definitions to type
gates_by_type: Dict[Type['cirq.Gate'], List[_GateDefinition]] = {}
for gate_set in gate_sets:
for gate_type in gate_set.supported_gate_types():
for serializer in gate_set.serializers[gate_type]:
gate_id = serializer.serialized_gate_id
if gate_id not in gate_definitions:
raise ValueError(
f'Serializer has {gate_id} which is not supported '
'by the device specification')
if gate_type not in gates_by_type:
gates_by_type[gate_type] = []
gate_def = gate_definitions[
gate_id].with_can_serialize_predicate(
serializer.can_serialize_predicate)
gates_by_type[gate_type].append(gate_def)
return SerializableDevice(
qubits=[cls._qid_from_str(q) for q in proto.valid_qubits],
gate_definitions=gates_by_type,
)
@staticmethod
def _qid_from_str(id_str: str) -> 'cirq.Qid':
"""Translates a qubit id string info cirq.Qid objects.
Tries to translate to GridQubit if possible (e.g. '4_3'), otherwise
falls back to using NamedQubit.
"""
try:
return v2.grid_qubit_from_proto_id(id_str)
except ValueError:
return v2.named_qubit_from_proto_id(id_str)
@classmethod
def _create_target_set(cls, ts: v2.device_pb2.TargetSet
) -> Set[Tuple['cirq.Qid', ...]]:
"""Transform a TargetSet proto into a set of qubit tuples"""
target_set = set()
for target in ts.targets:
qid_tuple = tuple(cls._qid_from_str(q) for q in target.ids)
target_set.add(qid_tuple)
if ts.target_ordering == v2.device_pb2.TargetSet.SYMMETRIC:
target_set.add(qid_tuple[::-1])
return target_set
def __str__(self) -> str:
# If all qubits are grid qubits, render an appropriate text diagram.
if all(isinstance(q, devices.GridQubit) for q in self.qubits):
diagram = circuits.TextDiagramDrawer()
qubits = cast(List['cirq.GridQubit'], self.qubits)
# Don't print out extras newlines if the row/col doesn't start at 0
min_col = min(q.col for q in qubits)
min_row = min(q.row for q in qubits)
for q in qubits:
diagram.write(q.col - min_col, q.row - min_row, str(q))
# Find pairs that are connected by two-qubit gates.
Pair = Tuple['cirq.GridQubit', 'cirq.GridQubit']
pairs = {
cast(Pair, pair)
for gate_defs in self.gate_definitions.values()
for gate_def in gate_defs if gate_def.number_of_qubits == 2
for pair in gate_def.target_set if len(pair) == 2
}
# Draw lines between connected pairs. Limit to horizontal/vertical
# lines since that is all the diagram drawer can handle.
for q1, q2 in sorted(pairs):
if q1.row == q2.row or q1.col == q2.col:
diagram.grid_line(q1.col - min_col, q1.row - min_row,
q2.col - min_col, q2.row - min_row)
return diagram.render(horizontal_spacing=3,
vertical_spacing=2,
use_unicode_characters=True)
return super().__str__()
def _find_operation_type(self,
op: 'cirq.Operation') -> Optional[_GateDefinition]:
"""Finds the type (or a compatible type) of an operation from within
a dictionary with keys of Gate type.
Returns:
the value corresponding to that key or None if no type matches
"""
for type_key, gate_defs in self.gate_definitions.items():
if isinstance(op.gate, type_key):
for gate_def in gate_defs:
if gate_def.can_serialize_predicate(op):
return gate_def
return None
def duration_of(self, operation: 'cirq.Operation') -> Duration:
gate_def = self._find_operation_type(operation)
if gate_def is None:
raise ValueError(
f'Operation {operation} does not have a known duration')
return gate_def.duration
def validate_operation(self, operation: 'cirq.Operation') -> None:
for q in operation.qubits:
if q not in self.qubits:
raise ValueError('Qubit not on device: {!r}'.format(q))
gate_def = self._find_operation_type(operation)
if gate_def is None:
raise ValueError(f'{operation} is not a supported gate')
req_num_qubits = gate_def.number_of_qubits
if req_num_qubits > 0:
if len(operation.qubits) != req_num_qubits:
raise ValueError(f'{operation} has {len(operation.qubits)} '
f'qubits but expected {req_num_qubits}')
if gate_def.is_permutation:
# A permutation gate can have any combination of qubits
if not gate_def.target_set:
# All qubits are valid
return
if not all(
q in gate_def.flattened_qubits for q in operation.qubits):
raise ValueError(
'Operation does not use valid qubits: {operation}.')
return
if len(operation.qubits) > 1:
# TODO(dstrain): verify args
if not gate_def.target_set:
# All qubit combinations are valid
return
qubit_tuple = tuple(operation.qubits)
if qubit_tuple not in gate_def.target_set:
# Target is not within the target sets specified by the gate.
raise ValueError(
f'Operation does not use valid qubit target: {operation}.')
```
#### File: google/engine/engine_client_test.py
```python
from unittest import mock
import pytest
from google.api_core import exceptions
from cirq.google.engine.engine_client import EngineClient, EngineException
from cirq.google.engine.client import quantum
from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes
def setup_mock_(client_constructor):
grpc_client = mock.Mock()
client_constructor.return_value = grpc_client
return grpc_client
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.create_quantum_program.return_value = result
code = qtypes.any_pb2.Any()
labels = {'hello': 'world'}
client = EngineClient()
assert client.create_program('proj', 'prog', code, 'A program',
labels) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
description='A program',
labels=labels), False)
assert client.create_program('proj', 'prog', code,
'A program') == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
description='A program'), False)
assert client.create_program('proj', 'prog', code,
labels=labels) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
labels=labels), False)
assert client.create_program('proj', 'prog', code) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code), False)
assert client.create_program('proj', program_id=None,
code=code) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj', qtypes.QuantumProgram(code=code), False)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.get_quantum_program.return_value = result
client = EngineClient()
assert client.get_program('proj', 'prog', False) == result
assert grpc_client.get_quantum_program.call_args[0] == (
'projects/proj/programs/prog', False)
assert client.get_program('proj', 'prog', True) == result
assert grpc_client.get_quantum_program.call_args[0] == (
'projects/proj/programs/prog', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_program_description(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.set_program_description('proj', 'prog', 'A program') == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
description='A program'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
assert client.set_program_description('proj', 'prog', '') == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.return_value = qtypes.QuantumProgram(
labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
labels = {'hello': 'world', 'color': 'blue', 'run': '1'}
assert client.set_program_labels('proj', 'prog', labels) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels=labels,
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.set_program_labels('proj', 'prog', {}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_add_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumProgram(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_program.return_value = existing
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.add_program_labels('proj', 'prog',
{'color': 'red'}) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.add_program_labels('proj', 'prog',
{'hello': 'world'}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'red',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.add_program_labels('proj', 'prog', {
'hello': 'world',
'color': 'blue'
}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'blue',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_remove_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumProgram(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_program.return_value = existing
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.remove_program_labels('proj', 'prog', ['other']) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.remove_program_labels('proj', 'prog',
['hello', 'weather']) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'red',
'run': '1',
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.remove_program_labels('proj', 'prog',
['color', 'weather', 'run']) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.delete_program('proj', 'prog')
assert grpc_client.delete_quantum_program.call_args[0] == (
'projects/proj/programs/prog', False)
assert not client.delete_program('proj', 'prog', delete_jobs=True)
assert grpc_client.delete_quantum_program.call_args[0] == (
'projects/proj/programs/prog', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.create_quantum_job.return_value = result
run_context = qtypes.any_pb2.Any()
labels = {'hello': 'world'}
client = EngineClient()
assert client.create_job('proj', 'prog', 'job0', ['processor0'],
run_context, 10, 'A job',
labels) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
description='A job',
labels=labels), False)
assert client.create_job(
'proj',
'prog',
'job0',
['processor0'],
run_context,
10,
'A job',
) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
description='A job'), False)
assert client.create_job('proj',
'prog',
'job0', ['processor0'],
run_context,
10,
labels=labels) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
labels=labels), False)
assert client.create_job('proj', 'prog', 'job0', ['processor0'],
run_context, 10) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
), False)
assert client.create_job('proj',
'prog',
job_id=None,
processor_ids=['processor0'],
run_context=run_context,
priority=10) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
), False)
with pytest.raises(ValueError, match='priority must be between 0 and 1000'):
client.create_job('proj',
'prog',
job_id=None,
processor_ids=['processor0'],
run_context=run_context,
priority=5000)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.get_quantum_job.return_value = result
client = EngineClient()
assert client.get_job('proj', 'prog', 'job0', False) == result
assert grpc_client.get_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0', False)
assert client.get_job('proj', 'prog', 'job0', True) == result
assert grpc_client.get_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_job_description(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.set_job_description('proj', 'prog', 'job0', 'A job') == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
description='A job'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
assert client.set_job_description('proj', 'prog', 'job0', '') == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_job.return_value = qtypes.QuantumJob(
labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
labels = {'hello': 'world', 'color': 'blue', 'run': '1'}
assert client.set_job_labels('proj', 'prog', 'job0', labels) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels=labels,
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.set_job_labels('proj', 'prog', 'job0', {}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_add_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumJob(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_job.return_value = existing
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.add_job_labels('proj', 'prog', 'job0',
{'color': 'red'}) == existing
assert grpc_client.update_quantum_job.call_count == 0
assert client.add_job_labels('proj', 'prog', 'job0',
{'hello': 'world'}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'red',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.add_job_labels('proj', 'prog', 'job0', {
'hello': 'world',
'color': 'blue'
}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'blue',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_remove_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumJob(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_job.return_value = existing
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.remove_job_labels('proj', 'prog', 'job0',
['other']) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.remove_job_labels('proj', 'prog', 'job0',
['hello', 'weather']) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'red',
'run': '1',
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.remove_job_labels('proj', 'prog', 'job0',
['color', 'weather', 'run']) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.delete_job('proj', 'prog', 'job0')
assert grpc_client.delete_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_cancel_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.cancel_job('proj', 'prog', 'job0')
assert grpc_client.cancel_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_job_results(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumResult(
parent='projects/proj/programs/prog/jobs/job0')
grpc_client.get_quantum_result.return_value = result
client = EngineClient()
assert client.get_job_results('proj', 'prog', 'job0') == result
assert grpc_client.get_quantum_result.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_processors(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumProcessor(name='projects/proj/processor/processor0'),
qtypes.QuantumProcessor(name='projects/proj/processor/processor1')
]
grpc_client.list_quantum_processors.return_value = results
client = EngineClient()
assert client.list_processors('proj') == results
assert grpc_client.list_quantum_processors.call_args[0] == ('projects/proj',
'')
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_processor(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProcessor(name='projects/proj/processors/processor0')
grpc_client.get_quantum_processor.return_value = result
client = EngineClient()
assert client.get_processor('proj', 'processor0') == result
assert grpc_client.get_quantum_processor.call_args[0] == (
'projects/proj/processors/processor0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_calibrations(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumCalibration(
name='projects/proj/processor/processor0/calibrations/123456'),
qtypes.QuantumCalibration(
name='projects/proj/processor/processor1/calibrations/224466')
]
grpc_client.list_quantum_calibrations.return_value = results
client = EngineClient()
assert client.list_calibrations('proj', 'processor0') == results
assert grpc_client.list_quantum_calibrations.call_args[0] == (
'projects/proj/processors/processor0', '')
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_calibration(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumCalibration(
name='projects/proj/processors/processor0/calibrations/123456')
grpc_client.get_quantum_calibration.return_value = result
client = EngineClient()
assert client.get_calibration('proj', 'processor0', 123456) == result
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/123456',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumCalibration(
name='projects/proj/processors/processor0/calibrations/123456')
grpc_client.get_quantum_calibration.return_value = result
client = EngineClient()
assert client.get_current_calibration('proj', 'processor0') == result
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/current',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration_does_not_exist(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_calibration.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
assert client.get_current_calibration('proj', 'processor0') is None
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/current',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration_error(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_calibration.side_effect = exceptions.BadRequest(
'boom')
client = EngineClient()
with pytest.raises(EngineException, match='boom'):
client.get_current_calibration('proj', 'processor0')
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_api_doesnt_retry_not_found_errors(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
with pytest.raises(EngineException, match='not found'):
client.get_program('proj', 'prog', False)
assert grpc_client.get_quantum_program.call_count == 1
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_api_retry_5xx_errors(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.side_effect = exceptions.ServiceUnavailable(
'internal error')
client = EngineClient(max_retry_delay_seconds=1)
with pytest.raises(TimeoutError,
match='Reached max retry attempts.*internal error'):
client.get_program('proj', 'prog', False)
assert grpc_client.get_quantum_program.call_count > 1
```
#### File: google/engine/engine_timeslot_test.py
```python
import datetime
import cirq
import cirq.google as cg
from cirq.google.engine.client.quantum_v1alpha1.gapic import enums
def test_timeslot_equality():
start = datetime.datetime.fromtimestamp(1582592400)
end = datetime.datetime.fromtimestamp(1582596000)
eq = cirq.testing.equals_tester.EqualsTester()
eq.add_equality_group(
cg.EngineTimeSlot(start_time=start, end_time=end),
cg.EngineTimeSlot(start_time=start, end_time=end),
cg.EngineTimeSlot(start_time=start,
end_time=end,
slot_type=enums.QuantumTimeSlot.TimeSlotType.
TIME_SLOT_TYPE_UNSPECIFIED))
eq.add_equality_group(
cg.EngineTimeSlot(start_time=start, end_time=end, project_id='123456'))
eq.add_equality_group(
cg.EngineTimeSlot(
start_time=start,
end_time=end,
slot_type=enums.QuantumTimeSlot.TimeSlotType.RESERVATION,
project_id='123456'))
eq.add_equality_group(
cg.EngineTimeSlot(
start_time=start,
end_time=end,
slot_type=enums.QuantumTimeSlot.TimeSlotType.MAINTENANCE,
project_id='123456'))
eq.add_equality_group(
cg.EngineTimeSlot(
start_time=start,
end_time=end,
slot_type=enums.QuantumTimeSlot.TimeSlotType.MAINTENANCE,
project_id='123456',
maintenance_title="Testing",
maintenance_description="Testing some new configuration."))
```
#### File: cirq/ops/named_qubit.py
```python
from cirq import protocols
from cirq.ops import raw_types
class NamedQubit(raw_types.Qid):
"""A qubit identified by name.
By default, NamedQubit has a lexicographic order. However, numbers within
the name are handled correctly. So, for example, if you print a circuit
containing `cirq.NamedQubit('qubit22')` and `cirq.NamedQubit('qubit3')`, the
wire for 'qubit3' will correctly come before 'qubit22'.
"""
def __init__(self, name: str) -> None:
self._name = name
self._comp_key = _pad_digits(name)
def _comparison_key(self):
return self._comp_key
@property
def dimension(self) -> int:
return 2
def __str__(self):
return self._name
@property
def name(self):
return self._name
def __repr__(self):
return 'cirq.NamedQubit({})'.format(repr(self._name))
@staticmethod
def range(*args, prefix: str):
"""Returns a range of NamedQubits.
The range returned starts with the prefix, and followed by a qubit for
each number in the range, e.g.:
NamedQubit.range(3, prefix="a") -> ["a1", "a2", "a3]
NamedQubit.range(2, 4, prefix="a") -> ["a2", "a3]
Args:
*args: Args to be passed to Python's standard range function.
prefix: A prefix for constructed NamedQubits.
Returns:
A list of NamedQubits.
"""
return [NamedQubit(prefix + str(i)) for i in range(*args)]
def _json_dict_(self):
return protocols.obj_to_dict_helper(self, ['name'])
def _pad_digits(text: str) -> str:
"""A str method with hacks to support better lexicographic ordering.
The output strings are not intended to be human readable.
The returned string will have digit-runs zero-padded up to at least 8
digits. That way, instead of 'a10' coming before 'a2', 'a000010' will come
after 'a000002'.
Also, the original length of each digit-run is appended after the
zero-padded run. This is so that 'a0' continues to come before 'a00'.
"""
was_on_digits = False
last_transition = 0
chunks = []
def handle_transition_at(k):
chunk = text[last_transition:k]
if was_on_digits:
chunk = chunk.rjust(8, '0') + ':' + str(len(chunk))
chunks.append(chunk)
for i in range(len(text)):
on_digits = text[i].isdigit()
if was_on_digits != on_digits:
handle_transition_at(i)
was_on_digits = on_digits
last_transition = i
handle_transition_at(len(text))
return ''.join(chunks)
```
#### File: cirq/optimizers/synchronize_terminal_measurements.py
```python
from typing import List, Set, Tuple, cast
from cirq import circuits, ops, protocols
class SynchronizeTerminalMeasurements():
"""Move measurements to the end of the circuit.
Move all measurements in a circuit to the final moment if it can accomodate
them (without overlapping with other operations). If
self._after_other_operations is true then a new moment will be added to the
end of the circuit containing all the measurements that should be brought
forward.
"""
def __init__(self, after_other_operations: bool = True):
"""
Args:
after_other_operations: Set by default. If the circuit's final
moment contains non-measurement operations and this is set then
a new empty moment is appended to the circuit before pushing
measurements to the end.
"""
self._after_other_operations = after_other_operations
def __call__(self, circuit: circuits.Circuit):
self.optimize_circuit(circuit)
def optimize_circuit(self, circuit: circuits.Circuit) -> None:
deletions: List[Tuple[int, ops.Operation]] = []
terminal_measures: Set[ops.Operation] = set()
qubits = circuit.all_qubits()
for qubit in qubits:
moment_index = cast(int, circuit.prev_moment_operating_on((qubit,)))
op = cast(ops.Operation, circuit.operation_at(qubit, moment_index))
if protocols.is_measurement(op):
deletions.append((moment_index, op))
terminal_measures.add(op)
if not deletions:
return
circuit.batch_remove(deletions)
if circuit[-1] and self._after_other_operations:
# Can safely add to the end if
# self._after_other_operations is false or we happen to get an
# empty final moment before re-adding all the measurements.
circuit.append(ops.Moment())
for op in terminal_measures:
circuit[-1] = circuit[-1].with_operation(op)
```
#### File: cirq/protocols/has_unitary_protocol_test.py
```python
import numpy as np
import cirq
def test_inconclusive():
class No:
pass
assert not cirq.has_unitary(object())
assert not cirq.has_unitary('boo')
assert not cirq.has_unitary(No())
def test_via_unitary():
class No1:
def _unitary_(self):
return NotImplemented
class No2:
def _unitary_(self):
return None
class Yes:
def _unitary_(self):
return np.array([[1]])
assert not cirq.has_unitary(No1())
assert not cirq.has_unitary(No2())
assert cirq.has_unitary(Yes())
def test_via_apply_unitary():
class No1(EmptyOp):
def _apply_unitary_(self, args):
return None
class No2(EmptyOp):
def _apply_unitary_(self, args):
return NotImplemented
class No3(cirq.SingleQubitGate):
def _apply_unitary_(self, args):
return NotImplemented
class No4: # A non-operation non-gate.
def _apply_unitary_(self, args):
assert False # Because has_unitary doesn't understand how to call.
class Yes1(EmptyOp):
def _apply_unitary_(self, args):
return args.target_tensor
class Yes2(cirq.SingleQubitGate):
def _apply_unitary_(self, args):
return args.target_tensor
assert cirq.has_unitary(Yes1())
assert cirq.has_unitary(Yes2())
assert not cirq.has_unitary(No1())
assert not cirq.has_unitary(No2())
assert not cirq.has_unitary(No3())
assert not cirq.has_unitary(No4())
def test_via_decompose():
class Yes1:
def _decompose_(self):
return []
class Yes2:
def _decompose_(self):
return [cirq.X(cirq.LineQubit(0))]
class No1:
def _decompose_(self):
return [cirq.depolarize(0.5).on(cirq.LineQubit(0))]
class No2:
def _decompose_(self):
return None
class No3:
def _decompose_(self):
return NotImplemented
assert cirq.has_unitary(Yes1())
assert cirq.has_unitary(Yes2())
assert not cirq.has_unitary(No1())
assert not cirq.has_unitary(No2())
assert not cirq.has_unitary(No3())
def test_via_has_unitary():
class No1:
def _has_unitary_(self):
return NotImplemented
class No2:
def _has_unitary_(self):
return False
class Yes:
def _has_unitary_(self):
return True
assert not cirq.has_unitary(No1())
assert not cirq.has_unitary(No2())
assert cirq.has_unitary(Yes())
def test_order():
class Yes1(EmptyOp):
def _has_unitary_(self):
return True
def _decompose_(self):
assert False
def _apply_unitary_(self, args):
assert False
def _unitary_(self):
assert False
class Yes2(EmptyOp):
def _has_unitary_(self):
return NotImplemented
def _decompose_(self):
return []
def _apply_unitary_(self, args):
assert False
def _unitary_(self):
assert False
class Yes3(EmptyOp):
def _has_unitary_(self):
return NotImplemented
def _decompose_(self):
return NotImplemented
def _apply_unitary_(self, args):
return args.target_tensor
def _unitary_(self):
assert False
class Yes4(EmptyOp):
def _has_unitary_(self):
return NotImplemented
def _decompose_(self):
return NotImplemented
def _apply_unitary_(self, args):
return NotImplemented
def _unitary_(self):
return np.array([[1]])
assert cirq.has_unitary(Yes1())
assert cirq.has_unitary(Yes2())
assert cirq.has_unitary(Yes3())
assert cirq.has_unitary(Yes4())
class EmptyOp(cirq.Operation):
"""A trivial operation that will be recognized as `_apply_unitary_`-able."""
@property
def qubits(self):
# coverage: ignore
return ()
def with_qubits(self, *new_qubits):
# coverage: ignore
return self
``` |
{
"source": "joshpark8/pybench",
"score": 2
} |
#### File: pybench/pybench/core.py
```python
from __future__ import annotations
from typing import List, Callable, Any, TypeVar
# standard libraries
import sys
import socket
import logging
from time import sleep
from timeit import default_timer
from threading import Thread
from abc import ABC, abstractmethod, abstractproperty
# external libs
import psutil
# public interface
__all__ = ['Benchmark', 'BenchmarkError', 'CPUResource', 'MemoryResource', 'coerce_type', ]
HOSTNAME = socket.gethostname()
class LogRecord(logging.LogRecord):
"""Extends `logging.LogRecord` to include a hostname."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.hostname = HOSTNAME
logging.setLogRecordFactory(LogRecord)
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
format='%(asctime)s.%(msecs)03d %(hostname)s %(name)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
class BenchmarkError(Exception):
"""Error in setup or running benchmark."""
class Benchmark(ABC):
"""Boilerplate for running a benchmark."""
log: Callable[[str], None]
args: List[Any]
repeat: int
spacing: float
annotation: str = '()'
def __init__(self, repeat: int = 1, spacing: float = 1.0, *args) -> None:
"""Initialize parameters."""
self.log = logging.getLogger(f'benchmark.{self.name}').info
self.args = list(args)
self.repeat = int(repeat)
self.spacing = float(spacing)
self.prepare()
@abstractproperty
def name(self) -> str:
"""The name of the benchmark."""
def setup(self, *args) -> None:
"""Initialize state or member data before run."""
self.args = list(args)
def prepare(self) -> None:
"""Wraps call to setup."""
try:
self.setup(*self.args)
except Exception as error:
raise BenchmarkError(f'setup for \'{self.name}\': {error}') from error
@abstractmethod
def task(self) -> None:
"""The task to be executed."""
def run(self) -> None:
"""Run benchmark some number of times."""
for i in range(1, self.repeat + 1):
self.prepare()
self.log(f'[{i}] start')
time = default_timer()
self.task()
elapsed = default_timer() - time
self.log(f'[{i}] {elapsed}')
sleep(self.spacing)
class Resource(Thread):
"""Monitor resource and log usage."""
log: Callable[[str], None]
resolution: float = 1.0
def __init__(self, resolution: float = resolution) -> None:
"""Initialize parameters."""
super().__init__(name=self.name, daemon=True)
self.resolution = resolution
self.log = logging.getLogger(f'resource.{self.name}').info
@abstractproperty
def name(self) -> str:
"""The name of the resource."""
@abstractmethod
def gather_telemetry(self) -> List[float]:
"""Return list of data points to log."""
def run(self) -> None:
"""Log telemetry."""
for data in iter(self.gather_telemetry, None):
if len(data) == 1:
metric, = data
self.log(str(metric))
else:
for i, metric in enumerate(data):
self.log(f'[{i}] {metric}')
sleep(self.resolution)
@classmethod
def new(cls, *args, **kwargs) -> Resource:
"""Initialize and start thread."""
thread = cls(*args, **kwargs)
thread.start()
return thread
class CPUResource(Resource):
"""Collect telemetry on CPU usage."""
name = 'cpu'
def gather_telemetry(self) -> List[float]:
values = psutil.cpu_percent(interval=self.resolution, percpu=True)
return [value / 100 for value in list(values)]
class MemoryResource(Resource):
"""Collect telemetry on Memory usage."""
name = 'memory'
def gather_telemetry(self) -> List[float]:
return [psutil.virtual_memory().percent / 100, ]
T = TypeVar('T', int, float, bool, type(None), str)
def coerce_type(value: str) -> T:
"""Passively coerce `value` to available type if possible."""
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
if value.lower() in ('none', 'null'):
return None
if value.lower() == 'true':
return True
if value.lower() == 'false':
return False
else:
return value
``` |
{
"source": "josh-parris/bricklink_api",
"score": 3
} |
#### File: bricklink_api/bricklink_api/color.py
```python
from .method import method as _method
def get_color_list(**kwargs) -> dict:
return _method("GET", "/colors",
**kwargs
)
def get_color(
color_id: int,
**kwargs
) -> dict:
return _method("GET", f'/colors/{color_id}',
**kwargs
)
```
#### File: bricklink_api/bricklink_api/item_mapping.py
```python
from .method import method as _method
from . import catalog_item as _catalog_item
def get_element_id(
type_: _catalog_item.Type,
no: str,
**kwargs
) -> dict:
return _method("GET", f'/item_mapping/{type_}/{no}',
**kwargs
)
def get_item_number(
element_id: int,
**kwargs
) -> dict:
return _method("GET", f'/item_mapping/{element_id}',
**kwargs
)
```
#### File: bricklink_api/bricklink_api/member.py
```python
from .method import method as _method
def get_member_rating(
username: str,
**kwargs
) -> dict:
return _method(
"GET", f'/members/{username}/ratings',
**kwargs
)
def get_member_note(
username: str,
**kwargs
) -> dict:
return _method(
"GET", f'/members/{username}/notes',
**kwargs
)
def create_member_note(
username: str,
note_resource: dict,
**kwargs
) -> dict:
return _method(
"POST", f'/members/{username}/notes',
json = note_resource,
**kwargs
)
def update_member_note(
username: str,
note_resource: dict,
**kwargs
) -> dict:
return _method(
"PUT", f'/members/{username}/notes',
json = note_resource,
**kwargs
)
def delete_member_note(
username: str,
**kwargs
) -> dict:
return _method(
"PUT", f'/members/{username}/notes',
**kwargs
)
```
#### File: bricklink_api/bricklink_api/message.py
```python
import enum as _enum
from .method import method as _method
class Direction(_enum.Enum):
OUT = "out"
IN = "in"
DEFAULT = "in"
def get_message_list(
*,
direction: Direction = None,
**kwargs
) -> dict:
return _method("GET", "/messages",
**kwargs
)
def get_message(
message_id: int,
**kwargs
) -> dict:
return _method("GET", f'/messages/{message_id}',
**kwargs
)
def reply_message(
message_id: int,
message_resource: dict,
**kwargs
) -> dict:
return _method("POST", f'/messages/{message_id}/reply',
json = message_resource,
**kwargs
)
``` |
{
"source": "JoshPaterson/bowditch",
"score": 2
} |
#### File: bowditch/tests/test_tables.py
```python
from bowditch.data import tables
from numpy import isclose
from pytest import raises
data_arrays = [tables.refraction_data, tables.dip_data]
def test_table_size():
for table in data_arrays:
assert len(table[0]) == len(table[1])
def test_refraction_table():
assert isclose(tables.refraction_table(35), 1.4)
assert isclose(tables.refraction_table(31), 1.7)
assert isclose(tables.refraction_table(37, interpolate=True), 1.3)
with raises(ValueError):
tables.refraction_table(10)
def test_simple_refraction_table():
assert isclose(tables.simple_refraction_table(61), 0)
assert isclose(tables.simple_refraction_table(35), 1)
assert isclose(tables.simple_refraction_table(31), 1.7)
with raises(ValueError):
tables.simple_refraction_table(10)
def test_dip_table():
assert isclose(tables.dip_table(20), 4.3)
assert isclose(tables.dip_table(74), 8.1)
assert isclose(tables.dip_table(90, interpolate=True), 9.2)
with raises(ValueError):
tables.dip_table(250)
```
#### File: JoshPaterson/bowditch/setup.py
```python
import os
from distutils.core import setup
from distutils.command.sdist import sdist
import bowditch # safe, because __init__.py contains no import statements
class my_sdist(sdist):
def make_distribution(self):
# See https://github.com/skyfielders/python-skyfield/issues/378
for path in self.filelist.files:
os.chmod(path, 0o644)
sdist.make_distribution(self)
setup(
cmdclass={'sdist': my_sdist},
name='bowditch',
version=bowditch.__version__,
description=bowditch.__doc__.split('\n', 1)[0],
long_description=open('README.md', 'rb').read().decode('utf-8'),
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/JoshPaterson/bowditch/',
packages=[
'bowditch',
'bowditch.data',
'bowditch.tests',
],
install_requires=[
'numpy',
],
)
``` |
{
"source": "JoshPaterson/python-skyfield",
"score": 2
} |
#### File: skyfield/tests/test_topos.py
```python
from assay import assert_raises
from numpy import abs, arange, sqrt
from skyfield import constants
from skyfield.api import Distance, load, wgs84, wms
from skyfield.functions import length_of
from skyfield.positionlib import Apparent, Barycentric
from skyfield.toposlib import ITRSPosition, iers2010
angle = (-15, 15, 35, 45)
def ts():
yield load.timescale()
def test_latitude_longitude_elevation_str_and_repr():
w = wgs84.latlon(36.7138, -112.2169, 2400.0)
assert str(w) == ('WGS84 latitude +36.7138 N'
' longitude -112.2169 E elevation 2400.0 m')
assert repr(w) == ('<GeographicPosition WGS84 latitude +36.7138 N'
' longitude -112.2169 E elevation 2400.0 m>')
w = wgs84.latlon([1.0, 2.0], [3.0, 4.0], [5.0, 6.0])
assert str(w) == (
'WGS84 latitude [+1.0000 +2.0000] N'
' longitude [3.0000 4.0000] E'
' elevation [5.0 6.0] m'
)
assert repr(w) == '<GeographicPosition {0}>'.format(w)
w = wgs84.latlon(arange(6.0), arange(10.0, 16.0), arange(20.0, 26.0))
assert str(w) == (
'WGS84 latitude [+0.0000 +1.0000 ... +4.0000 +5.0000] N'
' longitude [10.0000 11.0000 ... 14.0000 15.0000] E'
' elevation [20.0 21.0 ... 24.0 25.0] m'
)
assert repr(w) == '<GeographicPosition {0}>'.format(w)
def test_raw_itrs_position():
d = Distance(au=[1, 2, 3])
p = ITRSPosition(d)
ts = load.timescale()
t = ts.utc(2020, 12, 16, 12, 59)
p.at(t)
def test_wgs84_velocity_matches_actual_motion():
# It looks like this is a sweet spot for accuracy: presumably a
# short enough fraction of a second that the vector does not time to
# change direction much, but long enough that the direction does not
# get lost down in the noise.
factor = 300.0
ts = load.timescale()
t = ts.utc(2019, 11, 2, 3, 53, [0, 1.0 / factor])
jacob = wgs84.latlon(36.7138, -112.2169)
p = jacob.at(t)
velocity1 = p.position.km[:,1] - p.position.km[:,0]
velocity2 = p.velocity.km_per_s[:,0]
assert length_of(velocity2 - factor * velocity1) < 0.0007
def test_lst():
ts = load.timescale()
ts.delta_t_table = [-1e99, 1e99], [69.363285] * 2 # from finals2000A.all
t = ts.utc(2020, 11, 27, 15, 34)
top = wgs84.latlon(0.0, 0.0)
expected = 20.0336663100 # see "authorities/horizons-lst"
actual = top.lst_hours_at(t)
difference_mas = (actual - expected) * 3600 * 15 * 1e3
horizons_ra_offset_mas = 51.25
difference_mas -= horizons_ra_offset_mas
assert abs(difference_mas) < 1.0
def test_itrs_xyz_attribute_and_itrf_xyz_method():
top = wgs84.latlon(45.0, 0.0, elevation_m=constants.AU_M - constants.ERAD)
x, y, z = top.itrs_xyz.au
assert abs(x - sqrt(0.5)) < 2e-7
assert abs(y - 0.0) < 1e-14
assert abs(z - sqrt(0.5)) < 2e-7
ts = load.timescale()
t = ts.utc(2019, 11, 2, 3, 53)
x, y, z = top.at(t).itrf_xyz().au
assert abs(x - sqrt(0.5)) < 1e-4
assert abs(y - 0.0) < 1e-14
assert abs(z - sqrt(0.5)) < 1e-4
def test_polar_motion_when_computing_topos_position(ts):
xp_arcseconds = 11.0
yp_arcseconds = 22.0
ts.polar_motion_table = [0.0], [xp_arcseconds], [yp_arcseconds]
top = iers2010.latlon(wms(42, 21, 24.1), wms(-71, 3, 24.8), 43.0)
t = ts.utc(2005, 11, 12, 22, 2)
# "expected" comes from:
# from novas.compat import ter2cel
# print(ter2cel(t.whole, t.ut1_fraction, t.delta_t, xp_arcseconds,
# yp_arcseconds, top.itrs_xyz.km, method=1))
expected = (3129.530248036487, -3535.1665884086683, 4273.94957733827)
assert max(abs(top.at(t).position.km - expected)) < 3e-11
def test_polar_motion_when_computing_altaz_coordinates(ts):
latitude = 37.3414
longitude = -121.6429
elevation = 1283.0
ra_hours = 5.59
dec_degrees = -5.45
xp_arcseconds = 11.0
yp_arcseconds = 22.0
ts.polar_motion_table = [0.0], [xp_arcseconds], [yp_arcseconds]
t = ts.utc(2020, 11, 12, 22, 16)
top = wgs84.latlon(latitude, longitude, elevation)
pos = Apparent.from_radec(ra_hours, dec_degrees, epoch=t)
pos.t = t
pos.center = top
alt, az, distance = pos.altaz()
# To generate the test altitude and azimuth below:
# from novas.compat import equ2hor, make_on_surface
# location = make_on_surface(latitude, longitude, elevation, 0, 0)
# (novas_zd, novas_az), (rar, decr) = equ2hor(
# t.ut1, t.delta_t, xp_arcseconds, yp_arcseconds, location,
# ra_hours, dec_degrees, 0,
# )
# novas_alt = 90.0 - novas_zd
# print(novas_alt, novas_az)
novas_alt = -58.091983295564205
novas_az = 1.8872567543791035
assert abs(alt.degrees - novas_alt) < 1.9e-9
assert abs(az.degrees - novas_az) < 1.3e-7
def test_subpoint_with_wrong_center(ts, angle):
t = ts.utc(2020, 12, 31)
p = Barycentric([0,0,0], t=t)
with assert_raises(ValueError, 'you can only calculate a geographic'
' position from a position which is geocentric'
' .center=399., but this position has a center of 0'):
wgs84.subpoint(p)
def test_iers2010_subpoint(ts, angle):
t = ts.utc(2018, 1, 19, 14, 37, 55)
# An elevation of 0 is more difficult for the routine's accuracy
# than a very large elevation.
top = iers2010.latlon(angle, angle, elevation_m=0.0)
p = top.at(t)
b = iers2010.subpoint(p)
error_degrees = abs(b.latitude.degrees - angle)
error_mas = 60.0 * 60.0 * 1000.0 * error_degrees
assert error_mas < 0.1
error_degrees = abs(b.longitude.degrees - angle)
error_mas = 60.0 * 60.0 * 1000.0 * error_degrees
assert error_mas < 0.1
def test_wgs84_subpoint(ts, angle):
t = ts.utc(2018, 1, 19, 14, 37, 55)
# An elevation of 0 is more difficult for the routine's accuracy
# than a very large elevation.
top = wgs84.latlon(angle, angle, elevation_m=0.0)
p = top.at(t)
b = wgs84.subpoint(p)
error_degrees = abs(b.latitude.degrees - angle)
error_mas = 60.0 * 60.0 * 1000.0 * error_degrees
assert error_mas < 0.1
error_degrees = abs(b.longitude.degrees - angle)
error_mas = 60.0 * 60.0 * 1000.0 * error_degrees
assert error_mas < 0.1
def test_wgs84_round_trip_with_polar_motion(ts, angle):
t = ts.utc(2018, 1, 19, 14, 37, 55)
ts.polar_motion_table = [0.0], [0.003483], [0.358609]
top = wgs84.latlon(angle, angle, elevation_m=0.0)
p = top.at(t)
b = wgs84.subpoint(p)
error_degrees = abs(b.latitude.degrees - angle)
error_mas = 60.0 * 60.0 * 1000.0 * error_degrees
assert error_mas < 0.1
error_degrees = abs(b.longitude.degrees - angle)
error_mas = 60.0 * 60.0 * 1000.0 * error_degrees
assert error_mas < 0.1
def test_latlon_and_subpoint_methods(ts, angle):
t = ts.utc(2020, 11, 3, 17, 5)
g = wgs84.latlon(angle, 2 * angle, elevation_m=1234.0)
pos = g.at(t)
def check_lat(lat): assert abs(g.latitude.mas() - lat.mas()) < 0.1
def check_lon(lon): assert abs(g.longitude.mas() - lon.mas()) < 0.1
def check_height(h): assert abs(g.elevation.m - h.m) < 1e-7
lat, lon = wgs84.latlon_of(pos)
check_lat(lat)
check_lon(lon)
height = wgs84.height_of(pos)
check_height(height)
g = wgs84.geographic_position_of(pos)
check_lat(g.latitude)
check_lon(g.longitude)
check_height(g.elevation)
g = wgs84.subpoint(pos) # old deprecated method name
check_lat(g.latitude)
check_lon(g.longitude)
check_height(g.elevation)
g = wgs84.subpoint_of(pos)
check_lat(g.latitude)
check_lon(g.longitude)
assert g.elevation.m == 0.0
def test_deprecated_position_subpoint_method(ts, angle):
t = ts.utc(2018, 1, 19, 14, 37, 55)
top = iers2010.latlon(angle, angle, elevation_m=0.0)
b = top.at(t).subpoint()
error_degrees = abs(b.latitude.degrees - angle)
error_mas = 60.0 * 60.0 * 1000.0 * error_degrees
assert error_mas < 0.1
error_degrees = abs(b.longitude.degrees - angle)
error_mas = 60.0 * 60.0 * 1000.0 * error_degrees
assert error_mas < 0.1
``` |
{
"source": "JoshPattman/Spot-Puppy-Lib",
"score": 3
} |
#### File: spotpuppy/rotation/arduino_rotation_sensor.py
```python
from . import rotation_sensor_base
import serial
class sensor(rotation_sensor_base.sensor):
def __init__(self, inverse_x=False, inverse_z=False, serial_port="/dev/ttyUSB0"):
rotation_sensor_base.sensor.__init__(self, inverse_x=inverse_x, inverse_z=inverse_z)
self.serial_port = serial_port
self.s = serial.Serial("/dev/ttyUSB0", 115200)
while self.s.readline() != b"READY\r\n":
pass
def calibrate(self):
self.s.write(b"c")
while self.s.readline() != b"READY\r\n":
pass
def update(self):
self.s.write(b"r")
rotation_string = self.s.readline().decode('utf-8').strip()
if rotation_string[0] == "D":
rotation_string = rotation_string[1:]
rotations_string = rotation_string.split(",", -1)
self.rotation[0] = float(rotations_string[0])
self.rotation[1] = float(rotations_string[1])
```
#### File: spotpuppy/rotation/mpu6050_rotation_sensor.py
```python
from math import atan, sqrt, pow, radians, degrees
from . import rotation_sensor_base
IS_IMPORTED=False
class sensor(rotation_sensor_base.sensor):
def __init__(self, inverse_x=False, inverse_z=False, accelerometer_bias=0.05):
global IS_IMPORTED
if not IS_IMPORTED:
global mpu6050
from mpu6050 import mpu6050
IS_IMPORTED = True
rotation_sensor_base.sensor.__init__(self, inverse_x=inverse_x, inverse_z=inverse_z)
self.accelerometer_bias=accelerometer_bias
self.mpu = mpu6050(0x68)
self.rotation[0] = 0
self.rotation[1] = 0
self.dx = 0
self.dy = 0
self.ax = 0
self.ay = 0
self.last_update = time.time()
def update(self):
# Get gyro data
data = self.mpu.get_gyro_data()
# Find elapsed time
t = time.time()
elaplsed = t - self.last_update
self.last_update = t
# Add the rotation velocity * time
self.rotation[0] += (data['x'] - self.dx) * elaplsed
self.rotation[1] += (data['y'] - self.dy) * elaplsed
# Get accel angle
aang = self._get_acc_ang()
# Add accel angle into the actual angle (slowly introducing it to reduce noise, as it is only really used to stop gyro drift)
self.rotation[0] = (self.rotation[0] * (1 - self.accelerometer_bias)) + (aang[0] * self.accelerometer_bias)
self.rotation[1] = (self.rotation[1] * (1 - self.accelerometer_bias)) + (aang[1] * -self.accelerometer_bias)
def calibrate(self):
data1 = self.mpu.get_gyro_data()
time.sleep(0.5)
data2 = self.mpu.get_gyro_data()
time.sleep(0.5)
data3 = self.mpu.get_gyro_data()
self.dx = (data1['x'] + data2['x'] + data3['x']) / 3
self.dy = (data1['y'] + data2['y'] + data3['y']) / 3
self.ax = 0
self.ay = 0
adata = self._get_acc_ang()
self.ax = adata[0]
self.ay = adata[1]
self.rotation[0] = 0
self.rotation[1] = 0
def _get_acc_ang(self):
data = self.mpu.get_accel_data()
ax = data['y']
ay = data['x']
az = data['z']
xAngle = degrees(atan(ax / (sqrt(pow(ay, 2) + pow(az, 2)))))
yAngle = degrees(atan(ay / (sqrt(pow(ax, 2) + pow(az, 2)))))
zAngle = degrees(atan(sqrt(pow(ax, 2) + pow(ay, 2)) / az))
return [xAngle - self.ax, yAngle - self.ay]
```
#### File: spotpuppy/rotation/rotation_sensor_base.py
```python
import numpy as np
class sensor:
def __init__(self, inverse_x=False, inverse_z=False):
self.inverse_x = inverse_x
self.inverse_z = inverse_z
self.flip_x_z = False
self.rotation = np.array([0, 0])
def get_angle(self):
rot = np.copy(self.rotation)
if self.flip_x_z:
rot[0], rot[1] = rot[1], rot[0]
if self.inverse_x:
rot[0] = -rot[0]
if self.inverse_z:
rot[1] = -rot[1]
return rot
# Override update to add functionality
def update(self):
pass
# Override update to add the ability to calibrate
def calibrate(self):
pass
def get_json_params(self):
return {"inverse_x": self.inverse_x,
"inverse_z": self.inverse_z,
"flip_x_z": self.flip_x_z
}
def set_json_params(self, d):
self.inverse_x = d['inverse_x']
self.inverse_z = d['inverse_z']
self.flip_x_z = d['flip_x_z']
```
#### File: spotpuppy/servo/servokit_servo_controller.py
```python
from adafruit_servokit import ServoKit
from . import servo_controller_base, servo_map
class controller(servo_controller_base.controller):
def __init__(self, servokit_channels=16):
servo_controller_base.controller.__init__(self)
self.mapping = servo_map.leg_servo_map()
self.aux_mapping = servo_map.aux_servo_map()
self.kit = ServoKit(channels=servokit_channels)
def set_servokit_servo(self, s, v):
if s == -1:
return
self.kit.servo[s].angle = clamp(v + 90, 0, 180)
def _set_leg_servo(self, leg, joint, value):
self.set_servokit_servo(self.mapping.get(leg, joint), value)
def _set_aux_servo(self, name, value):
self.set_servokit_servo(self.aux_mapping.get(name, -1), value)
def _get_json(self):
return {"legs": self.mapping.get_dict(), "aux": self.aux_mapping.get_dict()}
def _set_json(self, d):
self.mapping.set_dict(d["legs"])
self.aux_mapping.set_dict(d["aux"])
def clamp(x, mi, ma):
if x < mi: return mi
if x > ma: return ma
return x
```
#### File: spotpuppy/utils/json_serialiser.py
```python
import json
import numpy as np
import os
def vec_2_to_json(vec, labels=["X", "Z"]):
return {labels[0]: float(vec[0]), labels[1]: float(vec[1])}
def vec_3_to_json(vec, labels=["X", "Y", "Z"]):
return {labels[0]: float(vec[0]), labels[1]: float(vec[1]), labels[2]: float(vec[2])}
def json_to_vec_2(js, labels=["X", "Z"]):
return np.array([float(js[labels[0]]), float(js[labels[1]])])
def json_to_vec_3(js, labels=["X", "Y", "Z"]):
return np.array([float(js[labels[0]]), float(js[labels[1]]), float(js[labels[2]])])
def save_json_dict(filename, dict):
if dict == None:
return
s = json.dumps(dict, indent=4)
with open(filename, 'w') as f:
f.write(s)
def load_json_dict(filename):
if not os.path.exists(filename):
print("Could not find " + filename + ", ignoring")
return
with open(filename, 'r') as f:
return json.loads(f.read())
def save_robot(quad, folder_name):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
print("Folder did not exist, creating one...")
robot_config = quad.get_json_dict()
servo_map = quad.servo_controller.get_json()
leg_cal = {}
leg_names = ["FL", "FR", "BL", "BR"]
for l in range(4):
leg_cal[leg_names[l]] = quad.quad_controller.legs[l].to_json_dict()
accel_cal = quad.rotation_sensor.get_json_params()
save_json_dict(folder_name + "/robot_config.json", robot_config)
save_json_dict(folder_name + "/servo_map.json", servo_map)
save_json_dict(folder_name + "/leg_setup.json", leg_cal)
save_json_dict(folder_name + "/gyro.json", accel_cal)
def load_into_robot(quad, folder_name):
if not os.path.exists(folder_name):
print("Specified config directory does not exist")
return
robot_config = load_json_dict(folder_name + "/robot_config.json")
servo_map = load_json_dict(folder_name + "/servo_map.json")
leg_cal = load_json_dict(folder_name + "/leg_setup.json")
accel_cal = load_json_dict(folder_name + "/gyro.json")
quad.rotation_sensor.set_json_params(accel_cal)
quad.set_json_dict(robot_config)
quad.servo_controller.set_json(servo_map)
leg_names = ["FL", "FR", "BL", "BR"]
for l in range(4):
quad.quad_controller.legs[l].load_json_dict(leg_cal[leg_names[l]])
```
#### File: spotpuppy/utils/pid_control.py
```python
import time
class pid_controller:
def __init__(self, Kp, Ki, Kd):
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
self.last_error = 0
self.integral = 0
self.reset_time()
self.target = 0
def update(self, val):
t = time.time()
dt = t - self.last_time
self.last_time = t
error = self.target - val
d = (error - self.last_error) / dt
self.integral += error * dt
self.last_error = error
return (self.Kp * error) + (self.Ki * self.integral) + (self.Kd * d)
def reset_time(self):
self.last_time = time.time()
def set_target(self, target):
self.target = target
``` |
{
"source": "JoshPaulie/AsyncOWM-py",
"score": 3
} |
#### File: AsyncOWM-py/AsyncOWM_py/helpers.py
```python
import aiohttp
from functools import total_ordering
from .custom_errors import CityNotFoundError, InvalidAPIKeyError, WrongLatitudeError
from .data_enums import Unit
async def make_request(request):
"""Checks passed URL. Returns json if successful, otherwise raises error"""
async with aiohttp.ClientSession() as session:
async with session.get(request) as response:
city_json = await response.json()
cod = int(city_json.get("cod"))
if cod == 404:
raise CityNotFoundError
elif cod == 401:
raise InvalidAPIKeyError
elif cod == 400:
raise WrongLatitudeError(
"This is a very vague and common error. Try another method of searching for your city."
)
elif cod == 200:
return city_json
@total_ordering
class Temp:
def __init__(self, temp: float, unit: Unit):
self.temp = temp
self.unit = unit
self.pretty = self.pretty_temp(self.temp, self.unit)
def pretty_temp(self, temp: float, unit: Unit) -> str:
"""Returns rounded temp with unit and ° symbol"""
symbol = unit.value[0]
temp = round(temp)
temp_str = f"{str(temp)}°{symbol}"
return temp_str
def __int__(self):
return int(self.temp)
def __str__(self):
return self.pretty
def __float__(self):
return self.temp
def __repr__(self):
return f"Temp({self.temp}, {self.unit}, {self.pretty})"
def __eq__(self, other):
return self.temp == other.temp
def __lt__(self, other):
return self.temp < other.temp
``` |
{
"source": "JoshPaulie/boilerbot",
"score": 3
} |
#### File: boilerbot/modules/bot_init.py
```python
from pathlib import Path
from datetime import datetime
"""
Objects used to load cogs during initialization
"""
def collect_cogs():
"""Recursively checks all python modules in ./cogs/ directory. All files must be valid cogs.
Because it checks recursively, cogs can be further organized into directories"""
files = Path("cogs").rglob("*.py")
for file in files:
if "__init__" not in file.name:
yield file.as_posix()[:-3].replace("/", ".")
def load_cogs(bot):
"""Takes bot instance argument, "loads" collected cogs into it."""
for cog in collect_cogs():
try:
bot.load_extension(cog)
except Exception as e:
print(f"Failed to load cog {cog}\n{e}")
timestamp = datetime.now().strftime("%H:%M %Y/%m/%d")
``` |
{
"source": "JoshPaulie/python-youtube",
"score": 3
} |
#### File: pyyoutube/utils/params_checker.py
```python
import logging
from typing import Optional, Union
from pyyoutube.error import ErrorCode, ErrorMessage, PyYouTubeException
from pyyoutube.utils.constants import RESOURCE_PARTS_MAPPING
logger = logging.getLogger(__name__)
def enf_comma_separated(
field: str,
value: Optional[Union[str, list, tuple, set]],
):
"""
Check to see if field's value type belong to correct type.
If it is, return api need value, otherwise, raise a PyYouTubeException.
Args:
field (str):
Name of the field you want to do check.
value (str, list, tuple, set, Optional)
Value for the field.
Returns:
Api needed string
"""
if value is None:
return None
try:
if isinstance(value, str):
return value
elif isinstance(value, (list, tuple, set)):
if isinstance(value, set):
logging.warning(f"Note: The order of the set is unreliable.")
return ",".join(value)
else:
raise PyYouTubeException(
ErrorMessage(
status_code=ErrorCode.INVALID_PARAMS,
message=f"Parameter ({field}) must be single str,comma-separated str,list,tuple or set",
)
)
except (TypeError, ValueError):
raise PyYouTubeException(
ErrorMessage(
status_code=ErrorCode.INVALID_PARAMS,
message=f"Parameter ({field}) must be single str,comma-separated str,list,tuple or set",
)
)
def enf_parts(resource: str, value: Optional[Union[str, list, tuple, set]], check=True):
"""
Check to see if value type belong to correct type, and if resource support the given part.
If it is, return api need value, otherwise, raise a PyYouTubeException.
Args:
resource (str):
Name of the resource you want to retrieve.
value (str, list, tuple, set, Optional):
Value for the part.
check (bool, optional):
Whether check the resource properties.
Returns:
Api needed part string
"""
if value is None:
parts = RESOURCE_PARTS_MAPPING[resource]
elif isinstance(value, str):
parts = set(value.split(","))
elif isinstance(value, (list, tuple, set)):
parts = set(value)
else:
raise PyYouTubeException(
ErrorMessage(
status_code=ErrorCode.INVALID_PARAMS,
message=f"Parameter (parts) must be single str,comma-separated str,list,tuple or set",
)
)
# check parts whether support.
if check:
support_parts = RESOURCE_PARTS_MAPPING[resource]
if not support_parts.issuperset(parts):
not_support_parts = ",".join(parts.difference(support_parts))
raise PyYouTubeException(
ErrorMessage(
status_code=ErrorCode.INVALID_PARAMS,
message=f"Parts {not_support_parts} for resource {resource} not support",
)
)
return ",".join(parts)
```
#### File: tests/apis/test_categories.py
```python
import json
import unittest
import responses
import pyyoutube
class ApiVideoCategoryTest(unittest.TestCase):
BASE_PATH = "testdata/apidata/categories/"
BASE_URL = "https://www.googleapis.com/youtube/v3/videoCategories"
with open(BASE_PATH + "video_category_single.json", "rb") as f:
VIDEO_CATEGORY_SINGLE = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "video_category_multi.json", "rb") as f:
VIDEO_CATEGORY_MULTI = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "video_category_by_region.json", "rb") as f:
VIDEO_CATEGORY_BY_REGION = json.loads(f.read().decode("utf-8"))
def setUp(self) -> None:
self.api = pyyoutube.Api(api_key="api key")
def testGetVideoCategories(self) -> None:
# test params
with self.assertRaises(pyyoutube.PyYouTubeException):
self.api.get_video_categories()
# test parts
with self.assertRaises(pyyoutube.PyYouTubeException):
self.api.get_video_categories(category_id="id", parts="id,not_part")
with responses.RequestsMock() as m:
m.add("GET", self.BASE_URL, json=self.VIDEO_CATEGORY_SINGLE)
m.add("GET", self.BASE_URL, json=self.VIDEO_CATEGORY_MULTI)
m.add("GET", self.BASE_URL, json=self.VIDEO_CATEGORY_BY_REGION)
res_by_single = self.api.get_video_categories(
category_id="17",
parts=["id", "snippet"],
return_json=True,
)
self.assertEqual(res_by_single["kind"], "youtube#videoCategoryListResponse")
self.assertEqual(len(res_by_single["items"]), 1)
self.assertEqual(res_by_single["items"][0]["id"], "17")
res_by_multi = self.api.get_video_categories(
category_id=["17", "18"],
parts="id,snippet",
)
self.assertEqual(len(res_by_multi.items), 2)
self.assertEqual(res_by_multi.items[1].id, "18")
res_by_region = self.api.get_video_categories(
region_code="US",
parts="id,snippet",
)
self.assertEqual(len(res_by_region.items), 32)
self.assertEqual(res_by_region.items[0].id, "1")
```
#### File: tests/apis/test_i18ns.py
```python
import json
import unittest
import responses
import pyyoutube
class ApiI18nTest(unittest.TestCase):
BASE_PATH = "testdata/apidata/i18ns/"
REGION_URL = "https://www.googleapis.com/youtube/v3/i18nRegions"
LANGUAGE_URL = "https://www.googleapis.com/youtube/v3/i18nLanguages"
with open(BASE_PATH + "regions_res.json", "rb") as f:
REGIONS_RES = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "language_res.json", "rb") as f:
LANGUAGE_RES = json.loads(f.read().decode("utf-8"))
def setUp(self) -> None:
self.api = pyyoutube.Api(api_key="api key")
def testGetI18nRegions(self) -> None:
with responses.RequestsMock() as m:
m.add("GET", self.REGION_URL, json=self.REGIONS_RES)
regions = self.api.get_i18n_regions(parts=["id", "snippet"])
self.assertEqual(regions.kind, "youtube#i18nRegionListResponse")
self.assertEqual(len(regions.items), 4)
self.assertEqual(regions.items[0].id, "VE")
regions_json = self.api.get_i18n_regions(return_json=True)
self.assertEqual(len(regions_json["items"]), 4)
def testGetI18nLanguages(self) -> None:
with responses.RequestsMock() as m:
m.add("GET", self.LANGUAGE_URL, json=self.LANGUAGE_RES)
languages = self.api.get_i18n_languages(parts=["id", "snippet"])
self.assertEqual(len(languages.items), 5)
self.assertEqual(languages.items[0].id, "zh-CN")
languages_json = self.api.get_i18n_languages(return_json=True)
self.assertEqual(len(languages_json["items"]), 5)
```
#### File: tests/apis/test_video_abuse_reason.py
```python
import json
import unittest
import responses
import pyyoutube
class ApiVideoAbuseReason(unittest.TestCase):
BASE_PATH = "testdata/apidata/abuse_reasons/"
BASE_URL = "https://www.googleapis.com/youtube/v3/videoAbuseReportReasons"
with open(BASE_PATH + "abuse_reason.json", "rb") as f:
ABUSE_REASON_RES = json.loads(f.read().decode("utf-8"))
def setUp(self) -> None:
self.api_with_token = pyyoutube.Api(access_token="access token")
def testGetVideoAbuseReportReason(self) -> None:
with responses.RequestsMock() as m:
m.add("GET", self.BASE_URL, json=self.ABUSE_REASON_RES)
abuse_res = self.api_with_token.get_video_abuse_report_reason(
parts=["id", "snippet"],
)
self.assertEqual(
abuse_res.kind, "youtube#videoAbuseReportReasonListResponse"
)
self.assertEqual(len(abuse_res.items), 3)
abuse_res_json = self.api_with_token.get_video_abuse_report_reason(
return_json=True
)
self.assertEqual(len(abuse_res_json["items"]), 3)
```
#### File: tests/models/test_auth_models.py
```python
import json
import unittest
import pyyoutube.models as models
class AuthModelTest(unittest.TestCase):
BASE_PATH = "testdata/modeldata/users/"
with open(BASE_PATH + "access_token.json", "rb") as f:
ACCESS_TOKEN_INFO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "user_profile.json", "rb") as f:
USER_PROFILE_INFO = json.loads(f.read().decode("utf-8"))
def testAccessToken(self) -> None:
m = models.AccessToken.from_dict(self.ACCESS_TOKEN_INFO)
self.assertEqual(m.access_token, "access_token")
def testUserProfile(self) -> None:
m = models.UserProfile.from_dict(self.USER_PROFILE_INFO)
self.assertEqual(m.id, "12345678910")
origin_data = json.dumps(self.USER_PROFILE_INFO, sort_keys=True)
d = m.to_json(sort_keys=True, allow_nan=False)
self.assertEqual(origin_data, d)
```
#### File: tests/models/test_playlist.py
```python
import json
import unittest
import pyyoutube.models as models
class PlaylistModelTest(unittest.TestCase):
BASE_PATH = "testdata/modeldata/playlists/"
with open(BASE_PATH + "playlist_content_details.json", "rb") as f:
CONTENT_DETAILS_INFO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "playlist_snippet.json", "rb") as f:
SNIPPET_INFO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "playlist_status.json", "rb") as f:
STATUS_INFO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "playlist_info.json", "rb") as f:
PLAYLIST_INFO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "playlist_api_response.json", "rb") as f:
PLAYLIST_RESPONSE_INFO = json.loads(f.read().decode("utf-8"))
def testPlayListContentDetails(self) -> None:
m = models.PlaylistContentDetails.from_dict(self.CONTENT_DETAILS_INFO)
self.assertEqual(m.itemCount, 4)
def testPlayListSnippet(self) -> None:
m = models.PlaylistSnippet.from_dict(self.SNIPPET_INFO)
self.assertEqual(
m.string_to_datetime(m.publishedAt).isoformat(), "2019-05-16T18:46:20+00:00"
)
self.assertEqual(m.title, "Assistant on Air")
self.assertEqual(
m.thumbnails.default.url, "https://i.ytimg.com/vi/D-lhorsDlUQ/default.jpg"
)
self.assertEqual(m.localized.title, "Assistant on Air")
def testPlayListStatus(self) -> None:
m = models.PlaylistStatus.from_dict(self.STATUS_INFO)
self.assertEqual(m.privacyStatus, "public")
def testPlayList(self) -> None:
m = models.Playlist.from_dict(self.PLAYLIST_INFO)
self.assertEqual(m.id, "PLOU2XLYxmsIJpufeMHncnQvFOe0K3MhVp")
self.assertEqual(m.player, None)
self.assertEqual(m.snippet.title, "Assistant on Air")
def testPlaylistListResponse(self) -> None:
m = models.PlaylistListResponse.from_dict(self.PLAYLIST_RESPONSE_INFO)
self.assertEqual(m.kind, "youtube#playlistListResponse")
self.assertEqual(m.pageInfo.totalResults, 416)
self.assertEqual(m.items[0].id, "PLOU2XLYxmsIJpufeMHncnQvFOe0K3MhVp")
```
#### File: tests/models/test_videos.py
```python
import json
import unittest
import pyyoutube
import pyyoutube.models as models
class VideoModelTest(unittest.TestCase):
BASE_PATH = "testdata/modeldata/videos/"
with open(BASE_PATH + "video_content_details.json", "rb") as f:
CONTENT_DETAILS_INFO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "video_topic_details.json", "rb") as f:
TOPIC_DETAILS_INFO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "video_snippet.json", "rb") as f:
SNIPPET_INFO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "video_statistics.json", "rb") as f:
STATISTICS_INFO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "video_status.json", "rb") as f:
STATUS_INFO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "video_info.json", "rb") as f:
VIDEO_INFO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "video_api_response.json", "rb") as f:
VIDEO_API_RESPONSE = json.loads(f.read().decode("utf-8"))
def testVideoContentDetails(self) -> None:
m = models.VideoContentDetails.from_dict(self.CONTENT_DETAILS_INFO)
self.assertEqual(m.duration, "PT21M7S")
seconds = m.get_video_seconds_duration()
self.assertEqual(seconds, 1267)
m.duration = None
self.assertEqual(m.get_video_seconds_duration(), None)
with self.assertRaises(pyyoutube.PyYouTubeException):
m.duration = "error datetime"
m.get_video_seconds_duration()
def testVideoTopicDetails(self) -> None:
m = models.VideoTopicDetails.from_dict(self.TOPIC_DETAILS_INFO)
self.assertEqual(m.topicIds[0], "/m/02jjt")
self.assertEqual(len(m.topicCategories), 1)
full_topics = m.get_full_topics()
self.assertEqual(full_topics[0].id, "/m/02jjt")
self.assertEqual(full_topics[0].description, "Entertainment (parent topic)")
def testVideoSnippet(self) -> None:
m = models.VideoSnippet.from_dict(self.SNIPPET_INFO)
self.assertEqual(
m.string_to_datetime(m.publishedAt).isoformat(), "2019-03-21T20:37:49+00:00"
)
m.publishedAt = None
self.assertEqual(m.string_to_datetime(m.publishedAt), None)
with self.assertRaises(pyyoutube.PyYouTubeException):
m.string_to_datetime("error datetime string")
self.assertEqual(m.channelId, "UC_x5XG1OV2P6uZZ5FSM9Ttw")
self.assertEqual(
m.thumbnails.default.url, "https://i.ytimg.com/vi/D-lhorsDlUQ/default.jpg"
)
self.assertEqual(m.tags[0], "Google")
self.assertEqual(
m.localized.title, "What are Actions on Google (Assistant on Air)"
)
def testVideoStatistics(self) -> None:
m = models.VideoStatistics.from_dict(self.STATISTICS_INFO)
self.assertEqual(m.viewCount, "8087")
def testVideoStatus(self) -> None:
m = models.VideoStatus.from_dict(self.STATUS_INFO)
self.assertEqual(m.uploadStatus, "processed")
self.assertEqual(
m.string_to_datetime(m.publishAt).isoformat(), "2019-03-21T20:37:49+00:00"
)
def testVideo(self) -> None:
m = models.Video.from_dict(self.VIDEO_INFO)
self.assertEqual(m.id, "D-lhorsDlUQ")
self.assertEqual(
m.snippet.title, "What are Actions on Google (Assistant on Air)"
)
def testVideoListResponse(self) -> None:
m = models.VideoListResponse.from_dict(self.VIDEO_API_RESPONSE)
self.assertEqual(m.kind, "youtube#videoListResponse")
self.assertEqual(m.pageInfo.totalResults, 1)
self.assertEqual(m.items[0].id, "D-lhorsDlUQ")
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.