repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
magicjohnson/targetprocess-client | tests/tests_serializers.py | 1 | 2681 | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
from unittest import TestCase
from pytz import UTC
from targetprocess.serializers import TargetProcessSerializer
class TargetProcessSerializerTest(TestCase):
def setUp(self):
self.maxDiff = None
def test_deserialize_dict(self):
data = {
'EndDate': '/Date(1441596445000-0500)/',
'Effort': 0.0,
'ResourceType': 'UserStory',
'Team': {
'Id': 298,
'Name': 'DevOps',
},
'LastCommentDate': None,
'CustomFields': [
{
'Name': 'UI Spec',
'Type': 'RichText',
'Value': None
},
{
'Name': 'Date',
'Type': 'DropDown',
'Value': '/Date(1441596445000-0500)/'
},
]
}
expected = {
'EndDate': datetime(2015, 9, 7, 3, 27, 25, tzinfo=UTC),
'Effort': 0.0,
'ResourceType': 'UserStory',
'Team': {
'Id': 298,
'Name': 'DevOps',
},
'LastCommentDate': None,
'CustomFields': [
{
'Name': 'UI Spec',
'Type': 'RichText',
'Value': None
},
{
'Name': 'Date',
'Type': 'DropDown',
'Value': datetime(2015, 9, 7, 3, 27, 25, tzinfo=UTC)
},
]
}
result = TargetProcessSerializer().deserialize(data)
self.assertEqual(result, expected)
def test_deserialize_dict_with_items(self):
data = {
'Items': [
{
'Date': '/Date(1441596445000-0500)/',
'NoneField': None,
'TextField': 'Text',
'NestedDict': {'Field': 'Value'},
'NestedList': [{'Field': 'Value'}]
}
]
}
expected = [
{
'Date': datetime(2015, 9, 7, 3, 27, 25, tzinfo=UTC),
'NoneField': None,
'TextField': 'Text',
'NestedDict': {'Field': 'Value'},
'NestedList': [{'Field': 'Value'}]
}
]
result = TargetProcessSerializer().deserialize(data)
self.assertEqual(result, expected)
| mit | 7,321,765,493,372,442,000 | 28.461538 | 72 | 0.419247 | false | 4.590753 | true | false | false |
mzweilin/EvadeML-Zoo | attacks/adaptive/adaptive_adversary.py | 1 | 13504 | """
Demo if adaptive adversary works against feature squeezing.
Embed the diffrentiable filter layers in a model.
Pass in the (average) gradient (part of loss) to an attack algorithm.
Implement the gaussian-noise-iterative method for non-diffrentiable filter layers (bit depth reduction.)
Introduce the randomized feature squeezing (need to verify with legitimate examples, should not harm the accuracy.)
"""
import os
import tensorflow as tf
import numpy as np
import math
# Core: Get the gradient of models for the attack algorithms.
# We will combine the gradient of several models.
from keras.models import Model
from keras.layers import Lambda, Input
def insert_pre_processing_layer_to_model(model, input_shape, func):
# Output model: accept [-0.5, 0.5] input range instead of [0,1], output logits instead of softmax.
# The output model will have three layers in abstract: Input, Lambda, TrainingModel.
model_logits = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
input_tensor = Input(shape=input_shape)
scaler_layer = Lambda(func, input_shape=input_shape)(input_tensor)
output_tensor = model_logits(scaler_layer)
model_new = Model(inputs=input_tensor, outputs=output_tensor)
return model_new
# maybe_generate_adv_examples(sess, model, x, y, X_test, Y_test_target, attack_name, attack_params, use_cache = x_adv_fpath, verbose=FLAGS.verbose, attack_log_fpath=attack_log_fpath)
def adaptive_attack(sess, model, squeezers, x, y, X_test, Y_test_target, attack_name, attack_params):
for squeeze_func in squeezers:
predictions = model(squeeze_func(x))
# tf.contrib.distributions.kl(dist_a, dist_b, allow_nan=False, name=None)
# from .median import median_filter as median_filter_tf
# from .median import median_random_filter as median_random_filter_tf
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from utils.squeeze import get_squeezer_by_name, reduce_precision_tf
# if FLAGS.dataset_name == "MNIST":
# # squeezers_name = ['median_smoothing_2', 'median_smoothing_3', 'binary_filter']
# squeezers_name = ['median_smoothing_2', 'binary_filter']
# elif FLAGS.dataset_name == "CIFAR-10":
# squeezers_name = ["bit_depth_5", "bit_depth_4", 'median_smoothing_1_2', 'median_smoothing_2_1','median_smoothing_2', 'median_smoothing_1_3']
# elif FLAGS.dataset_name == "ImageNet":
# squeezers_name = ["bit_depth_5", 'median_smoothing_1_2', 'median_smoothing_2_1','median_smoothing_2']
def get_tf_squeezer_by_name(name):
return get_squeezer_by_name(name, 'tensorflow')
tf_squeezers_name_mnist = ['median_filter_2_2', 'bit_depth_1']
tf_squeezers_name_cifar10 = ['median_filter_1_2', 'median_filter_2_1', 'median_filter_2_2', 'median_filter_1_3', 'bit_depth_5', 'bit_depth_4']
tf_squeezers_name_imagenet = ['median_filter_1_2', 'median_filter_2_1', 'median_filter_2_2', 'median_filter_1_3', 'bit_depth_5']
# tf_squeezers = map(get_tf_squeezer_by_name, tf_squeezers_name)
def get_tf_squeezers_by_str(tf_squeezers_str):
tf_squeezers_name = tf_squeezers_str.split(',')
return map(get_tf_squeezer_by_name, tf_squeezers_name)
def kl_tf(x1, x2, eps = 0.000000001):
x1 = tf.clip_by_value(x1, eps, 1)
x2 = tf.clip_by_value(x2, eps, 1)
return tf.reduce_sum(x1 * tf.log(x1/x2), reduction_indices=[1])
def generate_adaptive_carlini_l2_examples(sess, model, x, y, X, Y_target, attack_params, verbose, attack_log_fpath):
# (model, x, y, X, Y_target, tf_squeezers=tf_squeezers, detector_threshold = 0.2):
# tf_squeezers=tf_squeezers
eval_dir = os.path.dirname(attack_log_fpath)
default_params = {
'batch_size': 100,
'confidence': 0,
'targeted': False,
'learning_rate': 9e-2,
'binary_search_steps': 9,
'max_iterations': 5000,
'abort_early': False, # TODO: not suported.
'initial_const': 0.0,
'detector_threshold': 0.3,
'uint8_optimized': False,
'tf_squeezers': [],
'distance_measure': 'l1',
'between_squeezers': False,
}
if 'tf_squeezers' in attack_params:
tf_squeezers_str = attack_params['tf_squeezers']
tf_squeezers = get_tf_squeezers_by_str(tf_squeezers_str)
attack_params['tf_squeezers'] = tf_squeezers
accepted_params = default_params.keys()
for k in attack_params:
if k not in accepted_params:
raise NotImplementedError("Unsuporrted params in Carlini L2: %s" % k)
else:
default_params[k] = attack_params[k]
# assert batch_size <= len(X)
if 'batch_size' in default_params and default_params['batch_size'] > len(X):
default_params['batch_size'] = len(X)
return adaptive_CarliniL2(sess, model, X, Y_target, eval_dir, **default_params)
def adaptive_CarliniL2(sess, model, X, Y_target, eval_dir, batch_size, confidence, targeted, learning_rate, binary_search_steps, max_iterations, abort_early, initial_const, detector_threshold, uint8_optimized, tf_squeezers, distance_measure, between_squeezers):
model_logits = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
# Need a determined batch size for coefficient vectors.
x = tf.placeholder(shape=X.shape, dtype=tf.float32)
y = tf.placeholder(shape=Y_target.shape, dtype=tf.float32)
# Adapted from Warren and Carlini's code
N0, H0, W0, C0 = X.shape
# Range [0, 1], initialize as the original images.
batch_images = X
# Get the arctanh of the original images.
batch_images_tanh = np.arctanh((batch_images - 0.5) / 0.501)
batch_labels = Y_target
x_star_tanh = tf.Variable(batch_images_tanh, dtype=tf.float32)
# Range [0, 1], initialize as the original images.
x_star = tf.tanh(x_star_tanh) / 2. + 0.5
# The result is optimized for uint8.
x_star_uint8 = reduce_precision_tf(x_star, 256)
# Gradient required.
y_pred_logits = model_logits(x_star)
y_pred = model(x_star)
print ("tf_squezers: %s" % tf_squeezers)
y_squeezed_pred_list = [ model(func(x_star)) for func in tf_squeezers ]
coeff = tf.placeholder(shape=(N0,), dtype=tf.float32)
l2dist = tf.reduce_sum(tf.square(x_star - x), [1, 2, 3])
ground_truth_logits = tf.reduce_sum(y * y_pred_logits, 1)
top_other_logits = tf.reduce_max((1 - y) * y_pred_logits - (y * 10000), 1)
# Untargeted attack, minimize the ground_truth_logits.
# target_penalty = tf.maximum(0., ground_truth_logits - top_other_logits)
if targeted is False:
# if untargeted, optimize for making this class least likely.
target_penalty = tf.maximum(0.0, ground_truth_logits-top_other_logits+confidence)
else:
# if targetted, optimize for making the other class most likely
target_penalty = tf.maximum(0.0, top_other_logits-ground_truth_logits+confidence)
# Minimize the sum of L1 score.
detector_penalty = None
# TODO: include between squeezers l1.
all_pred_list = [y_pred] + y_squeezed_pred_list
if between_squeezers:
print ("#Between squeezers")
for i, pred_base in enumerate(all_pred_list):
for j in range(i+1, len(all_pred_list)):
pred_target = all_pred_list[j]
if distance_measure == "l1":
score = tf.reduce_sum(tf.abs(pred_base - pred_target), 1)
elif distance_measure == 'kl_f':
score = kl_tf(pred_base, pred_target)
elif distance_measure == 'kl_b':
score = kl_tf(pred_target, pred_base)
detector_penalty_sub = tf.maximum(0., score - detector_threshold)
if detector_penalty is None:
detector_penalty = detector_penalty_sub
else:
detector_penalty += detector_penalty_sub
else:
for y_squeezed_pred in y_squeezed_pred_list:
if distance_measure == "l1":
score = tf.reduce_sum(tf.abs(y_pred - y_squeezed_pred), 1)
elif distance_measure == 'kl_f':
score = kl_tf(y_pred, y_squeezed_pred)
elif distance_measure == 'kl_b':
score = kl_tf(y_squeezed_pred, y_pred)
detector_penalty_sub = tf.maximum(0., score - detector_threshold)
if detector_penalty is None:
detector_penalty = detector_penalty_sub
else:
detector_penalty += detector_penalty_sub
# There could be different desion choices. E.g. add one coefficient for the detector penalty.
loss = tf.add((target_penalty + detector_penalty) * coeff, l2dist)
# Minimize loss by updating variables in var_list.
train_adv_step = tf.train.AdamOptimizer(learning_rate).minimize(loss, var_list=[x_star_tanh])
# Why the last four global variables are the optimizer variables?
# <tf.Variable 'beta1_power:0' shape=() dtype=float32_ref>
# <tf.Variable 'beta2_power:0' shape=() dtype=float32_ref>
# <tf.Variable 'Variable/Adam:0' shape=(10, 28, 28, 1) dtype=float32_ref>
# <tf.Variable 'Variable/Adam_1:0' shape=(10, 28, 28, 1) dtype=float32_ref>
optimizer_variables = tf.global_variables()[-4:]
# The result is optimized for uint8. Added by Weilin.
if uint8_optimized:
predictions = tf.argmax(model_logits(x_star_uint8), 1)
else:
predictions = tf.argmax(model_logits(x_star), 1)
if targeted is False:
correct_prediction = tf.equal(predictions, tf.argmax(y, 1))
else:
correct_prediction = tf.not_equal(predictions, tf.argmax(y, 1))
# Initialize loss coefficients
coeff_block_log = np.tile([[initial_const], [float('nan')], [float('nan')]], (1, N0))
coeff_curr_log = coeff_block_log[0]
coeff_high_log = coeff_block_log[1]
coeff_low_log = coeff_block_log[2]
# Collect best adversarial images
best_l2 = np.zeros((N0,)) + float('nan')
best_coeff_log = np.zeros((N0,)) + float('nan')
best_iter = np.zeros((N0,)) + float('nan')
best_images = np.copy(batch_images)
# I didn't find the initialization of random perturbations?
for _ in range(binary_search_steps):
# Reset x_star_tanh and optimizer
sess.run(tf.variables_initializer([x_star_tanh] + optimizer_variables))
tf.assert_variables_initialized()
print (coeff_curr_log) # %%%
curr_coeff = np.exp(coeff_curr_log)
# Initially, all are failed adversarial examples.
all_fail = np.ones((N0,), dtype=np.bool)
# Training loop
improve_count = 0
# 5000 iterations by default.
for j in range(max_iterations):
# Correct prediction means it is failed untargeted attacks.
xst, adv_fail, l1o, l2d, _ = sess.run([x_star, correct_prediction, detector_penalty, l2dist, train_adv_step], feed_dict={
x: batch_images,
y: batch_labels,
coeff: curr_coeff,
})
all_fail = np.logical_and(all_fail, adv_fail)
for i in range(N0):
if adv_fail[i] or l1o[i] > 0:
continue
# Save the best sucessful adversarial examples, with lowest L2.
if math.isnan(best_l2[i]) or l2d[i] < best_l2[i]:
best_l2[i] = l2d[i]
best_coeff_log[i] = coeff_curr_log[i]
best_iter[i] = j
best_images[i] = xst[i]
improve_count += 1
if j % 100 == 0:
print("Adv. training iter. {}/{} improved {}".format(j, max_iterations, improve_count))
improve_count = 0
xst, adv_fail, l1o, l2d = sess.run([x_star, correct_prediction, detector_penalty, l2dist], feed_dict={
x: batch_images,
y: batch_labels,
})
# Run it once more, becase the last iteration in for loop doesn't get evaluated.
for i in range(N0):
if adv_fail[i] or l1o[i] > 0:
continue
if math.isnan(best_l2[i]) or l2d[i] < best_l2[i]:
best_l2[i] = l2d[i]
best_coeff_log[i] = coeff_curr_log[i]
best_iter[i] = max_iterations
best_images[i] = xst[i]
improve_count += 1
print("Finished training {}/{} improved {}".format(max_iterations, max_iterations, improve_count))
# Save generated examples and their coefficients
np.save(eval_dir + '/combined_adv_imgs.npy', best_images)
np.save(eval_dir + '/combined_adv_coeff_log.npy', best_coeff_log)
# Update coeff
for i, (fail, curr, high, low) in enumerate(zip(adv_fail, coeff_curr_log, coeff_high_log, coeff_low_log)):
if fail:
# increase to allow more distortion
coeff_low_log[i] = low = curr
if math.isnan(high):
coeff_curr_log[i] = curr + 2.3
else:
coeff_curr_log[i] = (high + low) / 2
else:
# decrease to penalize distortion
coeff_high_log[i] = high = curr
if math.isnan(low):
coeff_curr_log[i] = curr - 0.69
else:
coeff_curr_log[i] = (high + low) / 2
np.save(eval_dir + '/combined_coeff_log.npy', coeff_block_log)
return best_images
| mit | -4,588,822,924,115,411,500 | 41.332288 | 261 | 0.61878 | false | 3.259474 | false | false | false |
google-research/google-research | pse/jumping_task/training_helpers.py | 1 | 8970 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Helpers for training an agent using imitation learning."""
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
EPS = 1e-9
LARGE_NUM = 1e9
def metric_fixed_point(action_cost_matrix, gamma=1.0):
"""Computes the pseudo-metric satisfying F using fixed point iteration."""
n, m = action_cost_matrix.shape
d_metric = np.zeros_like(action_cost_matrix)
def fixed_point_operator(d_metric):
d_metric_new = np.empty_like(d_metric)
for i in range(n):
for j in range(m):
d_metric_new[i, j] = action_cost_matrix[i, j] + \
gamma * d_metric[min(i + 1, n - 1), min(j + 1, m - 1)]
return d_metric_new
while True:
d_metric_new = fixed_point_operator(d_metric)
if np.sum(np.abs(d_metric - d_metric_new)) < EPS:
break
else:
d_metric = d_metric_new
return d_metric
@tf.function
def tf_metric_fixed_point(action_cost_matrix, gamma):
return tf.numpy_function(
metric_fixed_point, [action_cost_matrix, gamma], Tout=tf.float32)
def calculate_action_cost_matrix(actions_1, actions_2):
action_equality = tf.math.equal(
tf.expand_dims(actions_1, axis=1), tf.expand_dims(actions_2, axis=0))
return 1.0 - tf.cast(action_equality, dtype=tf.float32)
def calculate_reward_cost_matrix(rewards_1, rewards_2):
diff = tf.expand_dims(rewards_1, axis=1) - tf.expand_dims(rewards_2, axis=0)
return tf.cast(tf.abs(diff), dtype=tf.float32)
def ground_truth_coupling(actions_1, actions_2):
"""Calculates ground truth coupling using optimal actions on two envs."""
diff = actions_2.index(1) - actions_1.index(1)
assert diff >= 0, 'Please pass the actions_2 as actions_1 and vice versa!'
n, m = len(actions_1), len(actions_2)
cost_matrix = np.ones((n, m), dtype=np.float32)
for i in range(n):
j = i + diff
if j < m:
cost_matrix[i, j] = 0.0
else:
break
return cost_matrix
@tf.function
def cosine_similarity(x, y):
"""Computes cosine similarity between all pairs of vectors in x and y."""
x_expanded, y_expanded = x[:, tf.newaxis], y[tf.newaxis, :]
similarity_matrix = tf.reduce_sum(x_expanded * y_expanded, axis=-1)
similarity_matrix /= (
tf.norm(x_expanded, axis=-1) * tf.norm(y_expanded, axis=-1) + EPS)
return similarity_matrix
@tf.function
def l2_distance(x, y):
"""Computes cosine similarity between all pairs of vectors in x and y."""
x_expanded, y_expanded = x[:, tf.newaxis], y[tf.newaxis, :]
return tf.sqrt(tf.reduce_sum((x_expanded - y_expanded)**2, axis=-1))
@tf.function
def contrastive_loss(similarity_matrix,
metric_values,
temperature,
coupling_temperature=1.0,
use_coupling_weights=True):
"""Contrative Loss with soft coupling."""
logging.info('Using alternative contrastive loss.')
metric_shape = tf.shape(metric_values)
similarity_matrix /= temperature
neg_logits1, neg_logits2 = similarity_matrix, similarity_matrix
col_indices = tf.cast(tf.argmin(metric_values, axis=1), dtype=tf.int32)
pos_indices1 = tf.stack(
(tf.range(metric_shape[0], dtype=tf.int32), col_indices), axis=1)
pos_logits1 = tf.gather_nd(similarity_matrix, pos_indices1)
row_indices = tf.cast(tf.argmin(metric_values, axis=0), dtype=tf.int32)
pos_indices2 = tf.stack(
(row_indices, tf.range(metric_shape[1], dtype=tf.int32)), axis=1)
pos_logits2 = tf.gather_nd(similarity_matrix, pos_indices2)
if use_coupling_weights:
metric_values /= coupling_temperature
coupling = tf.exp(-metric_values)
pos_weights1 = -tf.gather_nd(metric_values, pos_indices1)
pos_weights2 = -tf.gather_nd(metric_values, pos_indices2)
pos_logits1 += pos_weights1
pos_logits2 += pos_weights2
negative_weights = tf.math.log((1.0 - coupling) + EPS)
neg_logits1 += tf.tensor_scatter_nd_update(
negative_weights, pos_indices1, pos_weights1)
neg_logits2 += tf.tensor_scatter_nd_update(
negative_weights, pos_indices2, pos_weights2)
neg_logits1 = tf.math.reduce_logsumexp(neg_logits1, axis=1)
neg_logits2 = tf.math.reduce_logsumexp(neg_logits2, axis=0)
loss1 = tf.reduce_mean(neg_logits1 - pos_logits1)
loss2 = tf.reduce_mean(neg_logits2 - pos_logits2)
return loss1 + loss2
def representation_alignment_loss(nn_model,
optimal_data_tuple,
use_bisim=False,
gamma=0.99,
use_l2_loss=False,
use_coupling_weights=False,
coupling_temperature=1.0,
temperature=1.0,
ground_truth=False):
"""Representation alignment loss."""
obs_1, actions_1, rewards_1 = optimal_data_tuple[0]
obs_2, actions_2, rewards_2 = optimal_data_tuple[1]
representation_1 = nn_model.representation(obs_1)
representation_2 = nn_model.representation(obs_2)
if use_l2_loss:
similarity_matrix = l2_distance(representation_1, representation_2)
else:
similarity_matrix = cosine_similarity(representation_1, representation_2)
if ground_truth:
metric_vals = tf.convert_to_tensor(
ground_truth_coupling(actions_1, actions_2), dtype=tf.float32)
else:
if use_bisim:
cost_matrix = calculate_reward_cost_matrix(rewards_1, rewards_2)
else:
cost_matrix = calculate_action_cost_matrix(actions_1, actions_2)
metric_vals = tf_metric_fixed_point(cost_matrix, gamma)
if use_l2_loss:
# Directly match the l2 distance between representations to metric values
alignment_loss = tf.reduce_mean((similarity_matrix - metric_vals)**2)
else:
alignment_loss = contrastive_loss(
similarity_matrix,
metric_vals,
temperature,
coupling_temperature=coupling_temperature,
use_coupling_weights=use_coupling_weights)
return alignment_loss, metric_vals, similarity_matrix
@tf.function
def cross_entropy(logits, targets):
labels = tf.stack([1 - targets, targets], axis=1)
loss_vals = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
return tf.reduce_mean(loss_vals)
def cross_entropy_loss(model, inputs, targets, training=False):
predictions = model(inputs, training=training)
return cross_entropy(predictions, targets)
@tf.function
def weight_decay(model):
l2_losses = [tf.nn.l2_loss(x) for x in model.trainable_variables]
return tf.add_n(l2_losses) / len(l2_losses)
def create_balanced_dataset(x_train, y_train, batch_size):
"""Creates a balanced training dataset by upsampling the rare class."""
def partition_dataset(x_train, y_train):
neg_mask = (y_train == 0)
x_train_neg = x_train[neg_mask]
y_train_neg = np.zeros(len(x_train_neg), dtype=np.float32)
x_train_pos = x_train[~neg_mask]
y_train_pos = np.ones(len(x_train_pos), dtype=np.float32)
return (x_train_pos, y_train_pos), (x_train_neg, y_train_neg)
pos, neg = partition_dataset(x_train, y_train)
pos_dataset = tf.data.Dataset.from_tensor_slices(pos).apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=len(pos[0])))
neg_dataset = tf.data.Dataset.from_tensor_slices(neg).apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=len(neg[0])))
dataset = tf.data.experimental.sample_from_datasets(
[pos_dataset, neg_dataset])
ds_tensors = dataset.batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
return ds_tensors
def create_dataset(x_train, y_train, batch_size):
"""Creates a training dataset."""
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=len(x_train[0])))
ds_tensors = dataset.batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
return ds_tensors
def create_iterators(datasets, batch_size):
"""Create tf.Dataset iterators from a list of numpy datasets."""
tf_datasets = [tf.data.Dataset.from_tensor_slices(data).batch(batch_size)
for data in datasets]
input_iterator = tf.data.Iterator.from_structure(
tf_datasets[0].output_types, tf_datasets[0].output_shapes)
init_ops = [input_iterator.make_initializer(data) for data in tf_datasets]
x_batch = input_iterator.get_next()
return x_batch, init_ops
| apache-2.0 | 3,118,179,143,913,050,600 | 36.219917 | 80 | 0.675362 | false | 3.246471 | false | false | false |
tomas-mazak/taipan | taipan/dictionary.py | 1 | 11826 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import re
import itertools
import gtk
import tegakigtk.recognizer
import cjklib.dictionary
import cjklib.dictionary.search
import cjklib.reading
import cjklib.characterlookup
import tagtable
import sorder
MODULE_DIR = os.path.dirname(os.path.abspath( __file__ ))
CJKLIB_OPTS = {'databaseUrl': 'sqlite:///' +
os.path.join(MODULE_DIR, 'cjklib.db')}
GLADE_FILE = os.path.join(MODULE_DIR, "taipan.glade")
class DictionaryWidget(gtk.Frame):
"""Custom widget encapsulating dictionary functions including handwriting
recognition"""
def __init__(self):
"""Init the widget components and required modules from cjklib"""
gtk.Frame.__init__(self)
# Init cjklib
cjklib.dictionary.search.setDefaultWildcards(singleCharacter='?',
multipleCharacters='*')
self.cjk = cjklib.characterlookup.CharacterLookup('T', **CJKLIB_OPTS)
self.dict = cjklib.dictionary.CEDICT(**CJKLIB_OPTS)
self.reading = cjklib.reading.ReadingFactory(**CJKLIB_OPTS)
# Fire up GtkBuilder
builder = gtk.Builder()
builder.add_from_file(GLADE_FILE)
# Get dictionary layout from GtkBuilder and add it to this widget
gladewin = builder.get_object("DictionaryWidget")
layout = builder.get_object("DictionaryWidgetLayout")
gladewin.remove(layout)
self.add(layout)
# Get search box and connect events
self.entry = builder.get_object("ent_search")
self.entry.connect("key_press_event", self._on_entry_keypress)
self.entry.connect("changed", self._on_entry_changed)
# Setup popup completition for search box
compl = gtk.EntryCompletion()
compl.set_popup_set_width(False)
self.entry.set_completion(compl)
# ListStore will contain nice string for displaying in popup and
# simplified characters to put into searchbox
self.compl_model = gtk.ListStore(str, str)
compl.set_model(self.compl_model)
compl.set_text_column(0)
# Match function just accepts all items from the list, as we are doing
# filtering stuff elsewhere
compl.set_match_func(lambda c,k,r: True)
compl.connect("match_selected", self._on_compl_match_selected)
# Get search button and connect events
search = builder.get_object("btn_search")
search.connect("clicked", self._on_search_clicked)
# Get option checkboxes
self.chk_reading = builder.get_object("chk_reading")
self.chk_translation = builder.get_object("chk_translation")
# Get result text buffer
tag = tagtable.TaipanTagTable()
self.rbuf = gtk.TextBuffer(tag)
result = builder.get_object("txt_result")
result.set_buffer(self.rbuf)
result.connect("button_press_event", self._on_result_click)
result.connect("populate_popup", self._on_result_popup)
# Get expander and add recognizer to it
self.recognizer = tegakigtk.recognizer.SimpleRecognizerWidget()
self.recognizer.connect("commit-string", self._on_recognizer_commit)
self.exp_recognizer = builder.get_object("exp_recognize")
self.exp_recognizer.add(self.recognizer)
def search(self, what=None):
"""Do the dictionary search and display the nicely formatted result"""
# If the text was provided as an argument, update the searchbox
if what != None:
self.entry.set_text(what)
# don't bother to search for empty string
if self.entry.get_text() == '':
return
# search in characters (HeadWord)
res = self.dict.getForHeadword(unicode(self.entry.get_text()))
# search in reading (Pinyin)
if self.chk_reading.get_active():
res2 = self.dict.getForReading(unicode(self.entry.get_text()),
reading='Pinyin',
toneMarkType='numbers')
res = itertools.chain(res, res2)
# search in translation
if self.chk_translation.get_active():
res2 = self.dict.getForTranslation(unicode(self.entry.get_text()))
res = itertools.chain(res, res2)
# Display the result
self.rbuf.set_text('\n')
num_results = 0
for r in res:
num_results += 1
# Chinese
self.rbuf.insert_with_tags_by_name(self.rbuf.get_end_iter(),
r.HeadwordSimplified, "headword")
if r.HeadwordSimplified != r.HeadwordTraditional:
s = " (" + r.HeadwordTraditional + ")"
self.rbuf.insert_with_tags_by_name(self.rbuf.get_end_iter(),
s, "headword")
# Reading
self.rbuf.insert(self.rbuf.get_end_iter(), "\n[ ")
self._add_formatted_reading(r.Reading)
self.rbuf.insert(self.rbuf.get_end_iter(), " ]\n\n")
# Translation
s = r.Translation[1:-1].split('/')
basictrans = s[0] + "\n"
extended = ""
for i in range(1, min(len(s), 11)):
m = " " + unichr(12928+i-1) + " " + s[i] + "\n"
extended += m
for i in range(11, len(s)):
m = " (" + str(i) + ") " + s[i] + "\n"
extended += m
self._add_text_with_readings(basictrans, ["basictrans"])
self._add_text_with_readings(extended)
self.rbuf.insert(self.rbuf.get_end_iter(), "\n\n")
# Display an error message if the given expression was not found
if num_results == 0:
self.rbuf.set_text("\nExpression '"
+ unicode(self.entry.get_text())
+ "' was not found in the dictionary!")
def _add_text_with_readings(self, text, tags=[]):
"""Find readings in the text and format them properly"""
# add reading blocks and plaintext before them
last = 0
for match in re.finditer('\[(.*)\]', text):
s = match.start(1)
e = match.end(1)
rd = self.reading.convert(match.group(1), self.dict.READING,
self.dict.READING,
sourceOptions=self.dict.READING_OPTIONS)
self.rbuf.insert_with_tags_by_name(self.rbuf.get_end_iter(),
text[last:s], *tags)
self._add_formatted_reading(rd, tags)
last = e
# append final part
self.rbuf.insert_with_tags_by_name(self.rbuf.get_end_iter(),
text[last:], *tags)
def _add_formatted_reading(self, reading, tags=[]):
"""Split reading string to syllables and add them with proper
style according to tone"""
decomp = self.reading.decompose(reading, 'Pinyin')
for ent in decomp:
if self.reading.isReadingEntity(ent, 'Pinyin'):
foo,tone = self.reading.splitEntityTone(ent, 'Pinyin')
if tone == 1:
self.rbuf.insert_with_tags_by_name(
self.rbuf.get_end_iter(), ent, "tone1", *tags)
elif tone == 2:
self.rbuf.insert_with_tags_by_name(
self.rbuf.get_end_iter(), ent, "tone2", *tags)
elif tone == 3:
self.rbuf.insert_with_tags_by_name(
self.rbuf.get_end_iter(), ent, "tone3", *tags)
elif tone == 4:
self.rbuf.insert_with_tags_by_name(
self.rbuf.get_end_iter(), ent, "tone4", *tags)
else:
self.rbuf.insert_with_tags_by_name(
self.rbuf.get_end_iter(), ent, "tonenull", *tags)
else:
self.rbuf.insert_with_tags_by_name(
self.rbuf.get_end_iter(), ent, *tags)
def _on_entry_keypress(self, widget, event):
"""Do dictionary search when RETURN was pressed inside the search box
"""
if(event.keyval == gtk.keysyms.Return):
self.search()
return False
def _on_entry_changed(self, widget):
"""Update popup completition whenever searchbox contents is changed"""
# Wildcard search for empty string is evil
if len(self.entry.get_text()) == 0:
return False
# Get matching items from dictionary and update the model
res = self.dict.getForHeadword(unicode(self.entry.get_text())+'*')
self.compl_model.clear()
for r in res:
s = r.HeadwordSimplified
if r.HeadwordSimplified != r.HeadwordTraditional:
s += " (" + r.HeadwordTraditional + ")"
s += " [" + r.Reading + "]"
self.compl_model.append([s, r.HeadwordSimplified])
return False
def _on_compl_match_selected(self, completion, model, row):
"""When an item from popup completition was selected, update
the search box with appropriate value"""
self.entry.set_text(model[row][1])
self.search()
return True
def _on_search_clicked(self, widget):
"""Do dictionary search when Search button was clicked"""
self.search()
def _on_result_click(self, widget, event):
"""If a CJK character was under the mouse pointer in the moment
of right-click, save the character for popup menu purposes"""
self.sorder_to_popup = None
# Right-click check
if event.button != 3:
return False
# Get the character under the mouse pointer
x,y = widget.window_to_buffer_coords(gtk.TEXT_WINDOW_TEXT,
int(event.x), int(event.y))
start = widget.get_iter_at_position(x, y)
s, e = start[0], start[0].copy()
e.forward_char()
char = s.get_text(e)
# If the character is not an CJK character, don't do anything
if not self.cjk.isCharacterInDomain(char):
return False
self.sorder_to_popup = char
return False
def _on_result_popup(self, widget, menu):
"""If a CJK character was targeted, add 'Show stroke order' item to
the popup menu"""
if self.sorder_to_popup != None:
menu_sorder = gtk.MenuItem( "Show stroke order")
menu_sorder.connect("activate", self._on_sorder_activate,
self.sorder_to_popup)
menu_sorder.show()
menu.prepend(menu_sorder)
return False
def _on_sorder_activate(self, widget, char):
"""Display stroke order animation window when "Show stroke order"
context menu item was activated"""
anim = sorder.StrokeOrderAnimation(char)
anim.start()
def _on_recognizer_commit(self, widget, char):
"""When a character from the recognizer was selected, add it to the
searchbox"""
self.entry.set_text(self.entry.get_text() + char)
self.search()
self.recognizer.clear_all()
def run():
"""Initialize the GUI"""
window = gtk.Window()
dictionary = DictionaryWidget()
window.add(dictionary)
window.connect("destroy", gtk.main_quit)
window.set_size_request(350,700)
window.set_title("Tajpan dictionary")
window.show_all()
gtk.gdk.threads_init()
gtk.gdk.threads_enter()
gtk.main()
gtk.gdk.threads_leave()
# If directly called, start the GUI
if __name__ == "__main__":
run()
| gpl-3.0 | -4,708,782,873,489,502,000 | 38.551839 | 78 | 0.568155 | false | 3.914598 | false | false | false |
erikriver/eduIntelligent-cynin | src/ubify.viewlets/ubify/viewlets/browser/addnewmenu.py | 1 | 10384 | ###############################################################################
#cyn.in is an open source Collaborative Knowledge Management Appliance that
#enables teams to seamlessly work together on files, documents and content in
#a secure central environment.
#
#cyn.in v2 an open source appliance is distributed under the GPL v3 license
#along with commercial support options.
#
#cyn.in is a Cynapse Invention.
#
#Copyright (C) 2008 Cynapse India Pvt. Ltd.
#
#This program is free software: you can redistribute it and/or modify it under
#the terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or any later version and observe
#the Additional Terms applicable to this program and must display appropriate
#legal notices. In accordance with Section 7(b) of the GNU General Public
#License version 3, these Appropriate Legal Notices must retain the display of
#the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have
#received a copy of the detailed Additional Terms License with this program.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
#Public License for more details.
#
#You should have received a copy of the GNU General Public License along with
#this program. If not, see <http://www.gnu.org/licenses/>.
#
#You can contact Cynapse at [email protected] with any problems with cyn.in.
#For any queries regarding the licensing, please send your mails to
# [email protected]
#
#You can also contact Cynapse at:
#802, Building No. 1,
#Dheeraj Sagar, Malad(W)
#Mumbai-400064, India
###############################################################################
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets.common import ViewletBase
from zope.component import getMultiAdapter
from Products.CMFCore.utils import getToolByName
from zope.component import getUtility,getAdapters
from zope.app.publisher.interfaces.browser import IBrowserMenu
from ubify.policy.config import spacesdefaultaddablenonfolderishtypes, spacesdefaultaddableforfolders
from Acquisition import aq_inner, aq_base, aq_parent
from ubify.cyninv2theme import checkHasPermission, getRootID, getLocationListForAddContent, canAddContent, getBestMatchedLocationForAddingContent, getDisallowedTypes
class AddNewMenuViewlet(ViewletBase):
render = ViewPageTemplateFile('addnew_menu.pt')
def getAddMenuItems(self,portal,id):
objlist = []
try:
objMenu = getattr(portal,id)
menu = getUtility(IBrowserMenu, name='plone_contentmenu_factory')
newmenu = menu.getMenuItems(objMenu,self.request)
for ob in newmenu:
if ob['extra']['id'] <> '_settings' and ob['extra']['id'] <> 'settings':
if id == 'views' and ob.has_key('id'):
if ob.has_key('absolute_url') == False:
ob['absolute_url'] = ob['action']
if ob.has_key('Title') == False:
ob['Title'] = ob['title']
if ob.has_key('portal_type') == False:
ob['portal_type'] = ob['id']
objlist.append(ob)
except AttributeError:
pass
return objlist
def update(self):
self.addnewitems = []
self.viewmenu = []
self.currentcontextmenu = []
self.currentcontexttitle = ''
self.contextualurl = ''
self.contextuid = ''
self.contextdisallowedtypes = []
portal_state = getMultiAdapter((self.context, self.request),name=u'plone_portal_state')
context_state = getMultiAdapter((self.context, self.request),name=u'plone_context_state')
typetool= getToolByName(self.context, 'portal_types')
object_typename = self.context.portal_type
portal = portal_state.portal()
self.spaceslist = getLocationListForAddContent(portal)
self.viewmenu = self.getAddMenuItems(portal,'views')
self.anonymous = portal_state.anonymous()
if not self.anonymous:
for eachtype in spacesdefaultaddablenonfolderishtypes:
object_typeobj = typetool[eachtype]
if object_typeobj <> None:
self.addnewitems.append({'id': object_typeobj.id,
'title': object_typeobj.Title(),
'description':object_typeobj.Description(),
'icon': object_typeobj.content_icon})
if self.context.portal_type == 'Folder':
self.addnewitems = []
for eachtype in spacesdefaultaddableforfolders:
object_typeobj = typetool[eachtype]
if object_typeobj <> None:
self.addnewitems.append({'id': object_typeobj.id,
'title': object_typeobj.Title(),
'description':object_typeobj.Description(),
'icon': object_typeobj.content_icon})
self.addnewcontainers = []
containers = ['ContentSpace', 'Course', 'Folder']
for eachtype in containers:
object_typeobj = typetool[eachtype]
if object_typeobj <> None:
self.addnewcontainers.append({'id': object_typeobj.id,
'title': object_typeobj.Title(),
'description':object_typeobj.Description(),
'icon': object_typeobj.content_icon})
self.contenidoeducativo = []
containers = ['Exam', 'Quiz', 'ZipContent', 'SCO']
for eachtype in containers:
object_typeobj = typetool[eachtype]
if object_typeobj <> None:
self.contenidoeducativo.append({'id': object_typeobj.id,
'title': object_typeobj.Title(),
'description':object_typeobj.Description(),
'icon': object_typeobj.content_icon})
self.addnewitems.sort(lambda x,y: cmp(x['title'].lower(),y['title'].lower()))
menu = getUtility(IBrowserMenu, name='plone_contentmenu_factory')
if object_typename in ('RecycleBin',):
self.currentcontextmenu = []
elif object_typename in ('Plone Site',):
#get root object and check for it
objRoot = getattr(portal,getRootID())
if checkHasPermission('Add portal content', aq_inner(objRoot)):
self.currentcontextmenu = menu.getMenuItems(objRoot,self.request)
self.contextualurl = aq_inner(objRoot).absolute_url()
self.currentcontexttitle = objRoot.Title()
self.contextuid = objRoot.UID()
self.contextdisallowedtypes = objRoot.disallowedtypes()
else:
if object_typename in ('ContentRoot','ContentSpace', 'Course','Folder') and self.context.isPrincipiaFolderish and checkHasPermission('Add portal content',aq_inner(self.context)):
self.currentcontextmenu = menu.getMenuItems(self.context, self.request)
self.contextualurl = aq_inner(self.context).absolute_url()
if object_typename in ('ContentRoot','ContentSpace','Course','Folder'):
self.currentcontexttitle = context_state.object_title()
self.contextuid = aq_inner(self.context).UID()
self.contextdisallowedtypes = (aq_inner(self.context)).disallowedtypes()
else:
currentobject = aq_inner(self.context)
parentList = currentobject.aq_chain
parentspace = None
found = 0
try:
for type in parentList:
if type.portal_type in ('ContentRoot','ContentSpace','Course'):
parentspace = type
if checkHasPermission('Add portal content',aq_inner(parentspace)):
found = 1
if found == 1:
break
except AttributeError:
parentspace = None
pass
if parentspace <> None:
self.currentcontextmenu = menu.getMenuItems(aq_inner(parentspace),self.request)
self.currentcontexttitle = parentspace.Title()
self.contextualurl = parentspace.absolute_url()
self.contextuid = parentspace.UID()
self.contextdisallowedtypes = parentspace.disallowedtypes()
#strip out 'settings' item(s)
self.currentcontextmenu = [ob for ob in self.currentcontextmenu if ob['extra']['id'] <> 'settings' and ob['extra']['id'] <> '_settings']
if self.contextuid == '':
#best match element is brain
bestmatchedspace = getBestMatchedLocationForAddingContent(portal)
if bestmatchedspace:
self.currentcontexttitle = bestmatchedspace.Title
self.contextuid = bestmatchedspace.UID
self.contextualurl = bestmatchedspace.getURL()
self.contextdisallowedtypes = bestmatchedspace.disallowedtypes
def icon(self, action):
icon = action.get('icon', None)
if icon is None:
icon = self.getIconFor('content_actions', action['id'])
return icon
| gpl-3.0 | 952,695,614,535,909,400 | 50.92 | 210 | 0.562307 | false | 4.650246 | false | false | false |
bruth/restlib2 | restlib2/params.py | 1 | 5458 | import logging
import warnings
import collections
from six import add_metaclass
from functools import partial
logger = logging.getLogger(__name__)
class Param(object):
"Describes a single parameter and defines a method for cleaning inputs."
def __init__(self, default=None, allow_list=False, description=None, param_key=None, choices=None, **kwargs):
self.default = default
self.allow_list = allow_list
self.description = description
self.param_key = param_key
self.choices = choices
for key in kwargs:
setattr(self, key, kwargs[key])
def clean(self, value, *args, **kwargs):
if self.choices and value not in self.choices:
raise ValueError('"{0}" not a valid choice'.format(value))
return value
def clean_list(self, values, *args, **kwargs):
return [self.clean(x, *args, **kwargs) for x in values]
class IntParam(Param):
def clean(self, value, *args, **kwargs):
return super(IntParam, self).clean(int(value), *args, **kwargs)
class FloatParam(Param):
def clean(self, value, *args, **kwargs):
return super(FloatParam, self).clean(float(value), *args, **kwargs)
class StrParam(Param):
def __init__(self, *args, **kwargs):
kwargs.setdefault('strip', True)
super(StrParam, self).__init__(*args, **kwargs)
def clean(self, value, *args, **kwargs):
value = str(value)
if self.strip:
value = value.strip()
return super(StrParam, self).clean(value, *args, **kwargs)
class UnicodeParam(StrParam):
def clean(self, value, *args, **kwargs):
value = str(value)
if self.strip:
value = value.strip()
return super(UnicodeParam, self).clean(value, *args, **kwargs)
class BoolParam(Param):
def __init__(self, *args, **kwargs):
kwargs.setdefault('true_values', ('t', 'true', '1', 'yes'))
kwargs.setdefault('false_values', ('f', 'false', '0', 'no'))
super(BoolParam, self).__init__(*args, **kwargs)
def clean(self, value, *args, **kwargs):
value = value.lower()
if value in self.true_values:
value = True
elif value in self.false_values:
value = False
else:
raise ValueError
return super(BoolParam, self).clean(value, *args, **kwargs)
class ParametizerMetaclass(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
fields = getattr(new_cls, '_fields', {}).copy()
defaults = getattr(new_cls, '_defaults', {}).copy()
if hasattr(new_cls, 'param_defaults'):
warnings.warn('Resource.param_defaults has been deprecated', DeprecationWarning)
defaults.update(new_cls.param_defaults)
for attr, value in attrs.items():
if not isinstance(value, collections.Callable) and not attr.startswith('_'):
# Wrap shorthand definition in param class
if isinstance(value, Param):
field = value
key = field.param_key or attr
value = field.default
else:
key = attr
field = Param(default=value)
clean_method = 'clean_{0}'.format(attr)
# Partially apply the clean method with the field as self
if clean_method in attrs:
field.clean = partial(attrs[clean_method], field)
fields[key] = field
defaults[key] = value
new_cls._fields = fields
new_cls._defaults = defaults
return new_cls
@add_metaclass(ParametizerMetaclass)
class Parametizer(object):
def clean(self, params=None, defaults=None):
if params is None:
params = {}
param_defaults = self._defaults.copy()
if defaults is not None:
param_defaults.update(defaults)
cleaned = {}
# Gather both sets of keys since there may be methods defined
# without a default value specified.
keys = set(list(param_defaults.keys()) + list(params.keys()))
for key in keys:
# Add the default value for non-existant keys in params
if key not in params:
cleaned[key] = param_defaults[key]
continue
# Get associated param instance or initialize default one
field = self._fields.get(key, Param())
# Support MultiValueDict (request.GET and POST)
if field.allow_list and hasattr(params, 'getlist'):
value = params.getlist(key)
else:
value = params.get(key)
# If any kind of error occurs while cleaning, revert to
# the default value
try:
if isinstance(value, (list, tuple)):
value = field.clean_list(value)
if not field.allow_list:
value = value[0]
else:
value = field.clean(value)
except Exception as e:
logger.debug('Error cleaning parameter: {0}'.format(e), extra={
'key': key,
'value': value,
})
value = param_defaults.get(key, value)
cleaned[key] = value
return cleaned
| bsd-2-clause | -2,967,277,310,874,502,700 | 32.484663 | 113 | 0.562294 | false | 4.328311 | false | false | false |
UI-DataScience/summer2014 | hw5/FirstName-LastName-twittercloud.py | 1 | 4434 | #!/usr/bin/python
# Week 5 problem 3. Twitter.
# Do not delete the comments.
# Do not chnage the functions names, do not change the input parameters.
# Do not change the return types of the functions.
# Your code goes to the part where it says your code goes here.
# Do not change anything else other than the part where it says your code goes here.
# Most of the code below is copied verbatim or modified slightly
# from the book Mining the Social Web 2nd Edition by Mathew A. Russell
from __future__ import print_function
import re
import twitter
import pandas as pd
import os
import pickle
from pytagcloud import create_tag_image, make_tags
def search_twitter(twitter_api, q, search_size = 100, stop_count = 1000):
'''
Modified from Example 1-5 in Mining the Social Web 2nd Edition.
Returns statuses, a list of dictionaries of twitter metadata.
Parameters:
twitter_api: Use twitter.Twitter to create twitter.api.Twitter object.
q (str): search query (e.g. #informatics)
search_size: default 100.
stop_count: stops search when the total size of tweets exceeds stop_count.
'''
# See https://dev.twitter.com/docs/api/1.1/get/search/tweets
search_results = twitter_api.search.tweets(q = q, count = search_size)
statuses = search_results['statuses']
# Iterate through results by following the cursor until we hit the count number
while stop_count > len(statuses):
try:
next_results = search_results['search_metadata']['next_results']
except KeyError, e: # No more results when next_results doesn't exist
break
# Create a dictionary from next_results, which has the following form:
# ?max_id=313519052523986943&q=NCAA&include_entities=1
kwargs = dict([ kv.split('=') for kv in next_results[1:].split("&") ])
next_results = twitter_api.search.tweets(**kwargs)
statuses += next_results['statuses']
print(len(statuses), 'tweets fetched...')
return statuses
def clean_statuses(statuses):
'''
Takes a list of dictionaries of tweet metadata returned from
search_twitter() function, and returns a list with all lowercase words
(no words with #, @, http, or non-alphabetical characters).
Parameters:
statuses: a list of dictionaries of tweet metadata returned from
search_twitter() function.
'''
status_texts = [status['text'] for status in statuses]
status_texts = [text.encode('ascii', 'ignore') for text in status_texts]
clean_tweets = []
# your code goes here
return clean_tweets
def get_counts(words):
'''
Takes a list of strings and returns a list of tuples (string, int).
Parameters:
words: a list of strings
Examples:
>>> get_counts(['a', 'a', 'b', 'b', 'b', 'c'])
[('b', 3), ('a', 2), ('c', 1)]
'''
# your code goes here
return counts
def main():
# XXX: Go to http://dev.twitter.com/apps/new to create an app and get values
# for these credentials, which you'll need to provide in place of these
# empty string values that are defined as placeholders.
# See https://dev.twitter.com/docs/auth/oauth for more information
# on Twitter's OAuth implementation.
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
OAUTH_TOKEN = ''
OAUTH_TOKEN_SECRET = ''
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,
CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth = auth)
# Search query, try your own.
q = '#informatics'
# calling search_twitter too often will lock you out for 1 hour.
# we will call search twitter once and save the result in a file.
if not os.path.isfile('{0}.p'.format(q)):
results = search_twitter(twitter_api, q)
pickle.dump(results, open('{0}.p'.format(q), 'wb'))
# load saved pickle file
results = pickle.load(open('{0}.p'.format(q), 'rb'))
# clean the tweets and extract the words we want
clean_tweets = clean_statuses(results)
# calculate the frequency of each word
word_count = get_counts(clean_tweets)
# use PyTagCloud to create a tag cloud
tags = make_tags(word_count, maxsize = 120)
# the image is store in 'cloud.png'
create_tag_image(tags, 'cloud.png', size = (900, 600), fontname = 'Lobster')
if __name__ == '__main__':
main() | mit | -2,413,207,903,548,857,300 | 33.379845 | 84 | 0.656067 | false | 3.815835 | false | false | false |
T2DREAM/t2dream-portal | src/encoded/commands/generate_ontology.py | 1 | 28527 | from rdflib import ConjunctiveGraph, exceptions, Namespace
from rdflib import RDFS, RDF, BNode
from rdflib.collection import Collection
import json
EPILOG = __doc__
OWLNS = Namespace("http://www.w3.org/2002/07/owl#")
OBO_OWL = Namespace("http://www.geneontology.org/formats/oboInOwl#")
EFO = Namespace("http://www.ebi.ac.uk/efo/")
OBO = Namespace("http://purl.obolibrary.org/obo/")
EFO_Synonym = EFO["alternative_term"]
OBO_Synonym = OBO["IAO_0000118"]
Synonym = OBO_OWL["hasExactSynonym"]
Ontology = OWLNS["Ontology"]
Restriction = OWLNS["Restriction"]
Class = OWLNS["Class"]
Thing = OWLNS["Thing"]
OnProperty = OWLNS["onProperty"]
SomeValuesFrom = OWLNS["someValuesFrom"]
IntersectionOf = OWLNS["intersectionOf"]
PART_OF = "http://purl.obolibrary.org/obo/BFO_0000050"
DEVELOPS_FROM = "http://purl.obolibrary.org/obo/RO_0002202"
HUMAN_TAXON = "http://purl.obolibrary.org/obo/NCBITaxon_9606"
HAS_PART = "http://purl.obolibrary.org/obo/BFO_0000051"
ACHIEVES_PLANNED_OBJECTIVE = "http://purl.obolibrary.org/obo/OBI_0000417"
DEFAULT_LANGUAGE = "en"
developental_slims = {
'UBERON:0000926': 'mesoderm',
'UBERON:0000924': 'ectoderm',
'UBERON:0000925': 'endoderm'
}
system_slims = {
'UBERON:0000383': 'musculature of body',
'UBERON:0000949': 'endocrine system',
'UBERON:0000990': 'reproductive system',
'UBERON:0001004': 'respiratory system',
'UBERON:0001007': 'digestive system',
'UBERON:0001008': 'excretory system',
'UBERON:0001009': 'circulatory system',
'UBERON:0001434': 'skeletal system',
'UBERON:0002405': 'immune system',
'UBERON:0002416': 'integumental system',
'UBERON:0001032': 'sensory system',
'UBERON:0001017': 'central nervous system',
'UBERON:0000010': 'peripheral nervous system'
}
organ_slims = {
'UBERON:0002369': 'adrenal gland',
'UBERON:0002110': 'gallbladder',
'UBERON:0002106': 'spleen',
'UBERON:0001043': 'esophagus',
'UBERON:0000004': 'nose',
'UBERON:0000056': 'ureter',
'UBERON:0000057': 'urethra',
'UBERON:0000059': 'large intestine',
'UBERON:0000165': 'mouth',
'UBERON:0000945': 'stomach',
'UBERON:0000948': 'heart',
'UBERON:0000955': 'brain',
'UBERON:0000970': 'eye',
'UBERON:0000991': 'gonad',
'UBERON:0001255': 'urinary bladder',
'UBERON:0001264': 'pancreas',
'UBERON:0001474': 'bone element',
'UBERON:0002048': 'lung',
'UBERON:0002097': 'skin of body',
'UBERON:0002107': 'liver',
'UBERON:0002108': 'small intestine',
'UBERON:0002113': 'kidney',
'UBERON:0002240': 'spinal cord',
'UBERON:0002367': 'prostate gland',
'UBERON:0002370': 'thymus',
'UBERON:0003126': 'trachea',
'UBERON:0001723': 'tongue',
'UBERON:0001737': 'larynx',
'UBERON:0006562': 'pharynx',
'UBERON:0001103': 'diaphragm',
'UBERON:0002185': 'bronchus',
'UBERON:0000029': 'lymph node',
'UBERON:0001132': 'parathyroid gland',
'UBERON:0002046': 'thyroid gland',
'UBERON:0001981': 'blood vessel',
'UBERON:0001473': 'lymphatic vessel',
'UBERON:0000178': 'blood',
'UBERON:0007844': 'cartilage element',
'UBERON:0001690': 'ear',
'UBERON:0001987': 'placenta',
'UBERON:0001911': 'mammary gland',
'UBERON:0001630': 'muscle organ',
'UBERON:0000007': 'pituitary gland',
'UBERON:0016887': 'extraembryonic component',
'UBERON:0001013': 'adipose tissue',
'UBERON:0000310': 'breast',
'UBERON:0000989': 'penis',
'UBERON:0004288': 'skeleton',
'UBERON:0000995': 'uterus',
'UBERON:0000996': 'vagina',
'UBERON:0000992': 'ovary',
'UBERON:0000473': 'testis',
'UBERON:0001637': 'artery',
'UBERON:0001638': 'vein',
'UBERON:0002050': 'embryonic structure',
'UBERON:0000160': 'intestine',
'UBERON:0002384': 'connective tissue'
}
assay_slims = {
# Note shortened synonyms are provided
'OBI:0000634': 'DNA methylation', # 'DNA methylation profiling'
'OBI:0000424': 'Transcription', # 'transcription profiling'
'OBI:0001398': 'DNA binding', # "protein and DNA interaction"
'OBI:0001854': 'RNA binding', # "protein and RNA interaction"
'OBI:0001917': '3D chromatin structure', # 'chromosome conformation identification objective'
'OBI:0000870': 'DNA accessibility', # 'single-nucleotide-resolution nucleic acid structure mapping assay'
'OBI:0001916': 'Replication timing',
'OBI:0000435': 'Genotyping',
'OBI:0000615': 'Proteomics',
}
slim_shims = {
# this allows us to manually assign term X to slim Y while waiting for ontology updates
'assay': {
# DNA accessibility
'OBI:0001924': 'DNA accessibility', # 'OBI:0000870' / MNase-seq
'OBI:0002039': 'DNA accessibility', # 'OBI:0000870', / ATAC-seq
'OBI:0001853': 'DNA accessibility', # 'OBI:0000870', / DNase-seq
'OBI:0001859': 'DNA accessibility', # 'OBI:0000870', / OBI:0000424 / FAIRE-seq
'OBI:0002042': '3D chromatin structure', # 'OBI:0000870' (Hi-C)
'OBI:0001848': '3D chromatin structure', # ChIA-PET / OBI:000870
'OBI:0001923': 'Proteomics', # OBI:0000615': 'MS-MS'
'OBI:0001849': 'Genotyping', # OBI:0000435 (DNA-PET)
'OBI:0002044': 'RNA binding', # OBI:0001854 (RNA-Bind-N-Seq)
'OBI:0002091': 'Transcription',
'OBI:0002092': 'Transcription',
'OBI:0002093': 'Transcription'
}
}
preferred_name = {
"OBI:0000626": "WGS",
"OBI:0001247": "genotyping HTS",
"OBI:0001332": "DNAme array",
"OBI:0001335": "microRNA counts",
"OBI:0001463": "RNA microarray",
"OBI:0001863": "WGBS",
"OBI:0001923": "MS-MS",
"OBI:0001271": "RNA-seq",
"OBI:0000716": "ChIP-seq",
"OBI:0001853": "DNase-seq",
"OBI:0001920": "Repli-seq",
"OBI:0001864": "RAMPAGE",
"OBI:0001393": "genotyping array",
"OBI:0002042": "Hi-C",
}
category_slims = {
'OBI:0000634': 'DNA methylation profiling',
'OBI:0000424': 'transcription profiling',
'OBI:0000435': 'genotyping',
'OBI:0000615': 'proteomics',
'OBI:0001916': 'replication',
'OBI:0001398': "protein and DNA interaction",
'OBI:0001854': "protein and RNA interaction"
}
objective_slims = {
'OBI:0000218': 'cellular feature identification objective',
'OBI:0001691': 'cellular structure feature identification objective',
'OBI:0001916': 'DNA replication identification objective',
'OBI:0001917': 'chromosome conformation identification objective',
'OBI:0001234': 'epigenetic modification identification objective',
'OBI:0001331': 'transcription profiling identification objective',
'OBI:0001690': 'molecular function identification objective',
'OBI:0000268': 'organism feature identification objective',
'OBI:0001623': 'organism identification objective',
'OBI:0001398': 'protein and DNA interaction identification objective',
'OBI:0001854': 'protein and RNA interaction identification objective'
}
type_slims = {
'OBI:0001700': 'immunoprecipitation assay',
'OBI:0000424': 'transcription profiling assay',
'OBI:0000634': 'DNA methylation profiling assay',
'OBI:0000435': 'genotyping assay'
}
# Note this also shows the final datastructure for ontology.json
ntr_assays = {
"NTR:0003660": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "microRNA counts",
"objectives": [],
"organs": [],
"preferred_name": "",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000612": {
"assay": ['RNA binding'],
"category": [],
"developmental": [],
"name": "Switchgear",
"objectives": [],
"organs": [],
"preferred_name": "",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000762": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "shRNA knockdown followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "shRNA RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000763": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "siRNA knockdown followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "siRNA RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0001132": {
"assay": ['RNA binding'],
"category": [],
"developmental": [],
"name": "RNA Bind-N-Seq",
"objectives": [],
"organs": [],
"preferred_name": "RNA Bind-N-Seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0003082": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "single cell isolation followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "single cell RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0004774": {
"assay": ['DNA accessibility'],
"category": [],
"developmental": [],
"name": "genetic modification followed by DNase-seq",
"objectives": [],
"organs": [],
"preferred_name": "genetic modification DNase-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0003814": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "CRISPR genome editing followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "CRISPR RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0004619": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "CRISPRi followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "CRISPRi RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000438": {
"assay": ['DNA accessibility'],
"category": [],
"developmental": [],
"name": "single-nuclei ATAC-seq",
"objectives": [],
"organs": [],
"preferred_name": "snATAC-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000444": {
"assay": ['DNA accessibility'],
"category": [],
"developmental": [],
"name": "single-cell ATAC-seq",
"objectives": [],
"organs": [],
"preferred_name": "scATAC-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0004875": {
"assay": ['Genotyping'],
"category": [],
"developmental": [],
"name": "genotyping by Hi-C",
"objectives": [],
"organs": [],
"preferred_name": "genotyping Hi-C",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
}
}
class Inspector(object):
""" Class that includes methods for querying an RDFS/OWL ontology """
def __init__(self, uri, language=""):
super(Inspector, self).__init__()
self.rdfGraph = ConjunctiveGraph()
try:
self.rdfGraph.parse(uri, format="application/rdf+xml")
except:
try:
self.rdfGraph.parse(uri, format="n3")
except:
raise exceptions.Error("Could not parse the file! Is it a valid RDF/OWL ontology?")
finally:
self.baseURI = self.get_OntologyURI() or uri
self.allclasses = self.__getAllClasses(includeDomainRange=True, includeImplicit=True, removeBlankNodes=False, excludeRDF_OWL=False)
def get_OntologyURI(self, return_as_string=True):
test = [x for x, y, z in self.rdfGraph.triples((None, RDF.type, Ontology))]
if test:
if return_as_string:
return str(test[0])
else:
return test[0]
else:
return None
def __getAllClasses(self, classPredicate="", includeDomainRange=False, includeImplicit=False, removeBlankNodes=True, addOWLThing=True, excludeRDF_OWL=True):
rdfGraph = self.rdfGraph
exit = {}
def addIfYouCan(x, mydict):
if excludeRDF_OWL:
if x.startswith('http://www.w3.org/2002/07/owl#') or \
x.startswith("http://www.w3.org/1999/02/22-rdf-syntax-ns#") or \
x.startswith("http://www.w3.org/2000/01/rdf-schema#"):
return mydict
if x not in mydict:
mydict[x] = None
return mydict
if addOWLThing:
exit = addIfYouCan(Thing, exit)
if classPredicate == "rdfs" or classPredicate == "":
for s in rdfGraph.subjects(RDF.type, RDFS.Class):
exit = addIfYouCan(s, exit)
if classPredicate == "owl" or classPredicate == "":
for s in rdfGraph.subjects(RDF.type, Class):
exit = addIfYouCan(s, exit)
if includeDomainRange:
for o in rdfGraph.objects(None, RDFS.domain):
exit = addIfYouCan(o, exit)
for o in rdfGraph.objects(None, RDFS.range):
exit = addIfYouCan(o, exit)
if includeImplicit:
for s, v, o in rdfGraph.triples((None, RDFS.subClassOf, None)):
exit = addIfYouCan(s, exit)
exit = addIfYouCan(o, exit)
for o in rdfGraph.objects(None, RDF.type):
exit = addIfYouCan(o, exit)
# get a list
exit = exit.keys()
if removeBlankNodes:
exit = [x for x in exit if not isBlankNode(x)]
return sort_uri_list_by_name(exit)
def __getTopclasses(self, classPredicate=''):
returnlist = []
for eachclass in self.__getAllClasses(classPredicate):
x = self.get_classDirectSupers(eachclass)
if not x:
returnlist.append(eachclass)
return sort_uri_list_by_name(returnlist)
def __getTree(self, father=None, out=None):
if not father:
out = {}
topclasses = self.toplayer
out[0] = topclasses
for top in topclasses:
children = self.get_classDirectSubs(top)
out[top] = children
for potentialfather in children:
self.__getTree(potentialfather, out)
return out
else:
children = self.get_classDirectSubs(father)
out[father] = children
for ch in children:
self.__getTree(ch, out)
def __buildClassTree(self, father=None, out=None):
if not father:
out = {}
topclasses = self.toplayer
out[0] = [Thing]
out[Thing] = sort_uri_list_by_name(topclasses)
for top in topclasses:
children = self.get_classDirectSubs(top)
out[top] = sort_uri_list_by_name(children)
for potentialfather in children:
self.__buildClassTree(potentialfather, out)
return out
else:
children = self.get_classDirectSubs(father)
out[father] = sort_uri_list_by_name(children)
for ch in children:
self.__buildClassTree(ch, out)
# methods for getting ancestores and descendants of classes: by default, we do not include blank nodes
def get_classDirectSupers(self, aClass, excludeBnodes=True, sortUriName=False):
returnlist = []
for o in self.rdfGraph.objects(aClass, RDFS.subClassOf):
if not (o == Thing):
if excludeBnodes:
if not isBlankNode(o):
returnlist.append(o)
else:
returnlist.append(o)
if sortUriName:
return sort_uri_list_by_name(remove_duplicates(returnlist))
else:
return remove_duplicates(returnlist)
def get_classDirectSubs(self, aClass, excludeBnodes=True):
returnlist = []
for s, v, o in self.rdfGraph.triples((None, RDFS.subClassOf, aClass)):
if excludeBnodes:
if not isBlankNode(s):
returnlist.append(s)
else:
returnlist.append(s)
return sort_uri_list_by_name(remove_duplicates(returnlist))
def get_classSiblings(self, aClass, excludeBnodes=True):
returnlist = []
for father in self.get_classDirectSupers(aClass, excludeBnodes):
for child in self.get_classDirectSubs(father, excludeBnodes):
if child != aClass:
returnlist.append(child)
return sort_uri_list_by_name(remove_duplicates(returnlist))
def entitySynonyms(self, anEntity, language=DEFAULT_LANGUAGE, getall=True):
if getall:
temp = []
# Uberon synonyms
for o in self.rdfGraph.objects(anEntity, Synonym):
temp += [o]
# EFO synonyms
for o in self.rdfGraph.objects(anEntity, EFO_Synonym):
temp += [o]
# OBI synonyms
for o in self.rdfGraph.objects(anEntity, OBO_Synonym):
temp += [o]
return temp
else:
for o in self.rdfGraph.objects(anEntity, Synonym):
if getattr(o, 'language') and getattr(o, 'language') == language:
return o
return ""
def classFind(self, name, exact=False):
temp = []
if name:
for x in self.allclasses:
if exact:
if x.__str__().lower() == str(name).lower():
return [x]
else:
if x.__str__().lower().find(str(name).lower()) >= 0:
temp.append(x)
return temp
def inferNamespacePrefix(aUri):
stringa = aUri.__str__()
try:
prefix = stringa.replace("#", "").split("/")[-1]
except:
prefix = ""
return prefix
def sort_uri_list_by_name(uri_list):
def get_last_bit(uri_string):
try:
x = uri_string.split("#")[1]
except:
x = uri_string.split("/")[-1]
return x
try:
return sorted(uri_list, key=lambda x: get_last_bit(x.__str__()))
except:
# TODO: do more testing.. maybe use a unicode-safe method instead of __str__
print("Error in <sort_uri_list_by_name>: possibly a UnicodeEncodeError")
return uri_list
def remove_duplicates(seq, idfun=None):
if seq:
if idfun is None:
def idfun(x):
return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result
else:
return []
def isBlankNode(aClass):
''' Checks for blank node '''
if type(aClass) == BNode:
return True
else:
return False
def splitNameFromNamespace(aUri):
stringa = aUri.__str__()
try:
ns = stringa.split("#")[0]
name = stringa.split("#")[1]
except:
ns = stringa.rsplit("/", 1)[0]
name = stringa.rsplit("/", 1)[1]
return (name, ns)
def iterativeChildren(nodes, terms, closure):
if closure == 'data':
data = 'data'
else:
data = 'data_with_develops_from'
results = []
while 1:
newNodes = []
if len(nodes) == 0:
break
for node in nodes:
results.append(node)
if terms[node][data]:
for child in terms[node][data]:
if child not in results:
newNodes.append(child)
nodes = list(set(newNodes))
return list(set(results))
def getSlims(goid, terms, slimType):
''' Get Slims '''
slims = []
slimTerms = {}
if slimType == 'developmental':
slimTerms = developental_slims
elif slimType == 'organ':
slimTerms = organ_slims
elif slimType == 'system':
slimTerms = system_slims
elif slimType == 'assay':
slimTerms = assay_slims
elif slimType == 'category':
slimTerms = category_slims
elif slimType == 'objective':
slimTerms = objective_slims
elif slimType == 'type':
slimTerms = type_slims
for slimTerm in slimTerms:
if slimType == 'developmental':
if slimTerm in terms[goid]['closure_with_develops_from']:
slims.append(slimTerms[slimTerm])
else:
if slimTerm in terms[goid]['closure']:
slims.append(slimTerms[slimTerm])
if slim_shims.get(slimType, {}):
# Overrides all Ontology based-slims
shim = slim_shims[slimType].get(goid, '')
if shim:
slims = [shim]
return slims
def getTermStructure():
return {
'id': '',
'name': '',
'preferred_name': '',
'parents': [],
'part_of': [],
'has_part': [],
'develops_from': [],
'achieves_planned_objective': [],
'organs': [],
'closure': [],
'slims': [],
'data': [],
'closure_with_develops_from': [],
'data_with_develops_from': [],
'synonyms': [],
'category': [],
'assay': [],
'types': [],
'objectives': []
}
def main():
''' Downloads UBERON, EFO and OBI ontologies and create a JSON file '''
import argparse
parser = argparse.ArgumentParser(
description="Get Uberon, EFO and OBI ontologies and generate the JSON file", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--uberon-url', help="Uberon version URL")
parser.add_argument('--efo-url', help="EFO version URL")
parser.add_argument('--obi-url', help="OBI version URL")
args = parser.parse_args()
uberon_url = args.uberon_url
efo_url = args.efo_url
obi_url = args.obi_url
urls = [obi_url, uberon_url, efo_url]
terms = {}
for url in urls:
data = Inspector(url)
for c in data.allclasses:
if isBlankNode(c):
for o in data.rdfGraph.objects(c, RDFS.subClassOf):
if isBlankNode(o):
pass
else:
for o1 in data.rdfGraph.objects(c, IntersectionOf):
collection = Collection(data.rdfGraph, o1)
col_list = []
for col in data.rdfGraph.objects(collection[1]):
col_list.append(col.__str__())
if HUMAN_TAXON in col_list:
if PART_OF in col_list:
for subC in data.rdfGraph.objects(c, RDFS.subClassOf):
term_id = splitNameFromNamespace(collection[0])[0].replace('_', ':')
if term_id not in terms:
terms[term_id] = getTermStructure()
terms[term_id]['part_of'].append(splitNameFromNamespace(subC)[0].replace('_', ':'))
elif DEVELOPS_FROM in col_list:
for subC in data.rdfGraph.objects(c, RDFS.subClassOf):
term_id = splitNameFromNamespace(collection[0])[0].replace('_', ':')
if term_id not in terms:
terms[term_id] = getTermStructure()
terms[term_id]['develops_from'].append(splitNameFromNamespace(subC)[0].replace('_', ':'))
else:
term_id = splitNameFromNamespace(c)[0].replace('_', ':')
if term_id not in terms:
terms[term_id] = getTermStructure()
terms[term_id]['id'] = term_id
try:
terms[term_id]['name'] = data.rdfGraph.label(c).__str__()
except:
terms[term_id]['name'] = ''
terms[term_id]['preferred_name'] = preferred_name.get(term_id, '')
# Get all parents
for parent in data.get_classDirectSupers(c, excludeBnodes=False):
if isBlankNode(parent):
for s, v, o in data.rdfGraph.triples((parent, OnProperty, None)):
if o.__str__() == PART_OF:
for o1 in data.rdfGraph.objects(parent, SomeValuesFrom):
if not isBlankNode(o1):
terms[term_id]['part_of'].append(splitNameFromNamespace(o1)[0].replace('_', ':'))
elif o.__str__() == DEVELOPS_FROM:
for o1 in data.rdfGraph.objects(parent, SomeValuesFrom):
if not isBlankNode(o1):
terms[term_id]['develops_from'].append(splitNameFromNamespace(o1)[0].replace('_', ':'))
elif o.__str__() == HAS_PART:
for o1 in data.rdfGraph.objects(parent, SomeValuesFrom):
if not isBlankNode(o1):
terms[term_id]['has_part'].append(splitNameFromNamespace(o1)[0].replace('_', ':'))
elif o.__str__() == ACHIEVES_PLANNED_OBJECTIVE:
for o1 in data.rdfGraph.objects(parent, SomeValuesFrom):
if not isBlankNode(o1):
terms[term_id]['achieves_planned_objective'].append(splitNameFromNamespace(o1)[0].replace('_', ':'))
else:
terms[term_id]['parents'].append(splitNameFromNamespace(parent)[0].replace('_', ':'))
for syn in data.entitySynonyms(c):
try:
terms[term_id]['synonyms'].append(syn.__str__())
except:
pass
for term in terms:
terms[term]['data'] = list(set(terms[term]['parents']) | set(terms[term]['part_of']) | set(terms[term]['achieves_planned_objective']))
terms[term]['data_with_develops_from'] = list(set(terms[term]['data']) | set(terms[term]['develops_from']))
for term in terms:
words = iterativeChildren(terms[term]['data'], terms, 'data')
for word in words:
terms[term]['closure'].append(word)
d = iterativeChildren(terms[term]['data_with_develops_from'], terms, 'data_with_develops_from')
for dd in d:
terms[term]['closure_with_develops_from'].append(dd)
terms[term]['closure'].append(term)
terms[term]['closure_with_develops_from'].append(term)
terms[term]['systems'] = getSlims(term, terms, 'system')
terms[term]['organs'] = getSlims(term, terms, 'organ')
terms[term]['developmental'] = getSlims(term, terms, 'developmental')
terms[term]['assay'] = getSlims(term, terms, 'assay')
terms[term]['category'] = getSlims(term, terms, 'category')
terms[term]['objectives'] = getSlims(term, terms, 'objective')
terms[term]['types'] = getSlims(term, terms, 'type')
del terms[term]['closure'], terms[term]['closure_with_develops_from']
for term in terms:
del terms[term]['parents'], terms[term]['develops_from']
del terms[term]['has_part'], terms[term]['achieves_planned_objective']
del terms[term]['id'], terms[term]['data'], terms[term]['data_with_develops_from']
terms.update(ntr_assays)
with open('ontology1.json', 'w') as outfile:
json.dump(terms, outfile)
if __name__ == '__main__':
main()
| mit | 6,067,716,353,645,445,000 | 34.3933 | 160 | 0.539138 | false | 3.511015 | false | false | false |
jamesbeebop/flask-admin | flask_admin/tests/test_model.py | 4 | 19143 | import wtforms
from nose.tools import eq_, ok_
from flask import Flask
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.test import Client
from wtforms import fields
from flask_admin import Admin, form
from flask_admin._compat import iteritems, itervalues
from flask_admin.model import base, filters
from flask_admin.model.template import macro
def wtforms2_and_up(func):
"""Decorator for skipping test if wtforms <2
"""
if int(wtforms.__version__[0]) < 2:
func.__test__ = False
return func
class Model(object):
def __init__(self, id=None, c1=1, c2=2, c3=3):
self.id = id
self.col1 = c1
self.col2 = c2
self.col3 = c3
class Form(form.BaseForm):
col1 = fields.StringField()
col2 = fields.StringField()
col3 = fields.StringField()
class SimpleFilter(filters.BaseFilter):
def apply(self, query):
query._applied = True
return query
def operation(self):
return 'test'
class MockModelView(base.BaseModelView):
def __init__(self, model, data=None, name=None, category=None,
endpoint=None, url=None, **kwargs):
# Allow to set any attributes from parameters
for k, v in iteritems(kwargs):
setattr(self, k, v)
super(MockModelView, self).__init__(model, name, category, endpoint, url)
self.created_models = []
self.updated_models = []
self.deleted_models = []
self.search_arguments = []
if data is None:
self.all_models = {1: Model(1), 2: Model(2)}
else:
self.all_models = data
self.last_id = len(self.all_models) + 1
# Scaffolding
def get_pk_value(self, model):
return model.id
def scaffold_list_columns(self):
columns = ['col1', 'col2', 'col3']
if self.column_exclude_list:
return filter(lambda x: x not in self.column_exclude_list, columns)
return columns
def init_search(self):
return bool(self.column_searchable_list)
def scaffold_filters(self, name):
return [SimpleFilter(name)]
def scaffold_sortable_columns(self):
return ['col1', 'col2', 'col3']
def scaffold_form(self):
return Form
# Data
def get_list(self, page, sort_field, sort_desc, search, filters,
page_size=None):
self.search_arguments.append((page, sort_field, sort_desc, search, filters))
return len(self.all_models), itervalues(self.all_models)
def get_one(self, id):
return self.all_models.get(int(id))
def create_model(self, form):
model = Model(self.last_id)
self.last_id += 1
form.populate_obj(model)
self.created_models.append(model)
self.all_models[model.id] = model
return True
def update_model(self, form, model):
form.populate_obj(model)
self.updated_models.append(model)
return True
def delete_model(self, model):
self.deleted_models.append(model)
return True
def setup():
app = Flask(__name__)
app.config['CSRF_ENABLED'] = False
app.secret_key = '1'
admin = Admin(app)
return app, admin
def test_mockview():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
eq_(view.model, Model)
eq_(view.name, 'Model')
eq_(view.endpoint, 'model')
# Verify scaffolding
eq_(view._sortable_columns, ['col1', 'col2', 'col3'])
eq_(view._create_form_class, Form)
eq_(view._edit_form_class, Form)
eq_(view._search_supported, False)
eq_(view._filters, None)
client = app.test_client()
# Make model view requests
rv = client.get('/admin/model/')
eq_(rv.status_code, 200)
# Test model creation view
rv = client.get('/admin/model/new/')
eq_(rv.status_code, 200)
rv = client.post('/admin/model/new/',
data=dict(col1='test1', col2='test2', col3='test3'))
eq_(rv.status_code, 302)
eq_(len(view.created_models), 1)
model = view.created_models.pop()
eq_(model.id, 3)
eq_(model.col1, 'test1')
eq_(model.col2, 'test2')
eq_(model.col3, 'test3')
# Try model edit view
rv = client.get('/admin/model/edit/?id=3')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1' in data)
rv = client.post('/admin/model/edit/?id=3',
data=dict(col1='test!', col2='test@', col3='test#'))
eq_(rv.status_code, 302)
eq_(len(view.updated_models), 1)
model = view.updated_models.pop()
eq_(model.col1, 'test!')
eq_(model.col2, 'test@')
eq_(model.col3, 'test#')
rv = client.get('/admin/model/edit/?id=4')
eq_(rv.status_code, 302)
# Attempt to delete model
rv = client.post('/admin/model/delete/?id=3')
eq_(rv.status_code, 302)
eq_(rv.headers['location'], 'http://localhost/admin/model/')
# Create a dispatched application to test that edit view's "save and
# continue" functionality works when app is not located at root
dummy_app = Flask('dummy_app')
dispatched_app = DispatcherMiddleware(dummy_app, {'/dispatched': app})
dispatched_client = Client(dispatched_app)
app_iter, status, headers = dispatched_client.post(
'/dispatched/admin/model/edit/?id=3',
data=dict(col1='another test!', col2='test@', col3='test#', _continue_editing='True'))
eq_(status, '302 FOUND')
eq_(headers['Location'], 'http://localhost/dispatched/admin/model/edit/?id=3')
model = view.updated_models.pop()
eq_(model.col1, 'another test!')
def test_permissions():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
view.can_create = False
rv = client.get('/admin/model/new/')
eq_(rv.status_code, 302)
view.can_edit = False
rv = client.get('/admin/model/edit/?id=1')
eq_(rv.status_code, 302)
view.can_delete = False
rv = client.post('/admin/model/delete/?id=1')
eq_(rv.status_code, 302)
def test_templates():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
view.list_template = 'mock.html'
view.create_template = 'mock.html'
view.edit_template = 'mock.html'
rv = client.get('/admin/model/')
eq_(rv.data, b'Success!')
rv = client.get('/admin/model/new/')
eq_(rv.data, b'Success!')
rv = client.get('/admin/model/edit/?id=1')
eq_(rv.data, b'Success!')
def test_list_columns():
app, admin = setup()
view = MockModelView(Model,
column_list=['col1', 'col3'],
column_labels=dict(col1='Column1'))
admin.add_view(view)
eq_(len(view._list_columns), 2)
eq_(view._list_columns, [('col1', 'Column1'), ('col3', 'Col3')])
client = app.test_client()
rv = client.get('/admin/model/')
data = rv.data.decode('utf-8')
ok_('Column1' in data)
ok_('Col2' not in data)
def test_exclude_columns():
app, admin = setup()
view = MockModelView(Model, column_exclude_list=['col2'])
admin.add_view(view)
eq_(view._list_columns, [('col1', 'Col1'), ('col3', 'Col3')])
client = app.test_client()
rv = client.get('/admin/model/')
data = rv.data.decode('utf-8')
ok_('Col1' in data)
ok_('Col2' not in data)
def test_sortable_columns():
app, admin = setup()
view = MockModelView(Model, column_sortable_list=['col1', ('col2', 'test1')])
admin.add_view(view)
eq_(view._sortable_columns, dict(col1='col1', col2='test1'))
def test_column_searchable_list():
app, admin = setup()
view = MockModelView(Model, column_searchable_list=['col1', 'col2'])
admin.add_view(view)
eq_(view._search_supported, True)
# TODO: Make calls with search
def test_column_filters():
app, admin = setup()
view = MockModelView(Model, column_filters=['col1', 'col2'])
admin.add_view(view)
eq_(len(view._filters), 2)
eq_(view._filters[0].name, 'col1')
eq_(view._filters[1].name, 'col2')
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'col1']], [(0, 'test')])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'col2']], [(1, 'test')])
# TODO: Make calls with filters
def test_filter_list_callable():
app, admin = setup()
flt = SimpleFilter('test', options=lambda: (('1', 'Test 1'), ('2', 'Test 2')))
view = MockModelView(Model, column_filters=[flt])
admin.add_view(view)
opts = flt.get_options(view)
eq_(len(opts), 2)
eq_(opts, [('1', u'Test 1'), ('2', u'Test 2')])
def test_form():
# TODO: form_columns
# TODO: form_excluded_columns
# TODO: form_args
# TODO: form_widget_args
pass
@wtforms2_and_up
def test_csrf():
class SecureModelView(MockModelView):
form_base_class = form.SecureForm
def scaffold_form(self):
return form.SecureForm
def get_csrf_token(data):
data = data.split('name="csrf_token" type="hidden" value="')[1]
token = data.split('"')[0]
return token
app, admin = setup()
view = SecureModelView(Model, endpoint='secure')
admin.add_view(view)
client = app.test_client()
################
# create_view
################
rv = client.get('/admin/secure/new/')
eq_(rv.status_code, 200)
ok_(u'name="csrf_token"' in rv.data.decode('utf-8'))
csrf_token = get_csrf_token(rv.data.decode('utf-8'))
# Create without CSRF token
rv = client.post('/admin/secure/new/', data=dict(name='test1'))
eq_(rv.status_code, 200)
# Create with CSRF token
rv = client.post('/admin/secure/new/', data=dict(name='test1',
csrf_token=csrf_token))
eq_(rv.status_code, 302)
###############
# edit_view
###############
rv = client.get('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1')
eq_(rv.status_code, 200)
ok_(u'name="csrf_token"' in rv.data.decode('utf-8'))
csrf_token = get_csrf_token(rv.data.decode('utf-8'))
# Edit without CSRF token
rv = client.post('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1',
data=dict(name='test1'))
eq_(rv.status_code, 200)
# Edit with CSRF token
rv = client.post('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1',
data=dict(name='test1', csrf_token=csrf_token))
eq_(rv.status_code, 302)
################
# delete_view
################
rv = client.get('/admin/secure/')
eq_(rv.status_code, 200)
ok_(u'name="csrf_token"' in rv.data.decode('utf-8'))
csrf_token = get_csrf_token(rv.data.decode('utf-8'))
# Delete without CSRF token, test validation errors
rv = client.post('/admin/secure/delete/',
data=dict(id="1", url="/admin/secure/"), follow_redirects=True)
eq_(rv.status_code, 200)
ok_(u'Record was successfully deleted.' not in rv.data.decode('utf-8'))
ok_(u'Failed to delete record.' in rv.data.decode('utf-8'))
# Delete with CSRF token
rv = client.post('/admin/secure/delete/',
data=dict(id="1", url="/admin/secure/", csrf_token=csrf_token),
follow_redirects=True)
eq_(rv.status_code, 200)
ok_(u'Record was successfully deleted.' in rv.data.decode('utf-8'))
def test_custom_form():
app, admin = setup()
class TestForm(form.BaseForm):
pass
view = MockModelView(Model, form=TestForm)
admin.add_view(view)
eq_(view._create_form_class, TestForm)
eq_(view._edit_form_class, TestForm)
ok_(not hasattr(view._create_form_class, 'col1'))
def test_modal_edit():
# bootstrap 2 - test edit_modal
app_bs2 = Flask(__name__)
admin_bs2 = Admin(app_bs2, template_mode="bootstrap2")
edit_modal_on = MockModelView(Model, edit_modal=True,
endpoint="edit_modal_on")
edit_modal_off = MockModelView(Model, edit_modal=False,
endpoint="edit_modal_off")
create_modal_on = MockModelView(Model, create_modal=True,
endpoint="create_modal_on")
create_modal_off = MockModelView(Model, create_modal=False,
endpoint="create_modal_off")
admin_bs2.add_view(edit_modal_on)
admin_bs2.add_view(edit_modal_off)
admin_bs2.add_view(create_modal_on)
admin_bs2.add_view(create_modal_off)
client_bs2 = app_bs2.test_client()
# bootstrap 2 - ensure modal window is added when edit_modal is enabled
rv = client_bs2.get('/admin/edit_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 2 - test edit modal disabled
rv = client_bs2.get('/admin/edit_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 2 - ensure modal window is added when create_modal is enabled
rv = client_bs2.get('/admin/create_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 2 - test create modal disabled
rv = client_bs2.get('/admin/create_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 3
app_bs3 = Flask(__name__)
admin_bs3 = Admin(app_bs3, template_mode="bootstrap3")
admin_bs3.add_view(edit_modal_on)
admin_bs3.add_view(edit_modal_off)
admin_bs3.add_view(create_modal_on)
admin_bs3.add_view(create_modal_off)
client_bs3 = app_bs3.test_client()
# bootstrap 3 - ensure modal window is added when edit_modal is enabled
rv = client_bs3.get('/admin/edit_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 3 - test modal disabled
rv = client_bs3.get('/admin/edit_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 3 - ensure modal window is added when edit_modal is enabled
rv = client_bs3.get('/admin/create_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 3 - test modal disabled
rv = client_bs3.get('/admin/create_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
def check_class_name():
class DummyView(MockModelView):
pass
view = DummyView(Model)
eq_(view.name, 'Dummy View')
def test_export_csv():
app, admin = setup()
client = app.test_client()
# test redirect when csv export is disabled
view = MockModelView(Model, column_list=['col1', 'col2'], endpoint="test")
admin.add_view(view)
rv = client.get('/admin/test/export/csv/')
eq_(rv.status_code, 302)
# basic test of csv export with a few records
view_data = {
1: Model(1, "col1_1", "col2_1"),
2: Model(2, "col1_2", "col2_2"),
3: Model(3, "col1_3", "col2_3"),
}
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'])
admin.add_view(view)
rv = client.get('/admin/model/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.mimetype, 'text/csv')
eq_(rv.status_code, 200)
ok_("Col1,Col2\r\n"
"col1_1,col2_1\r\n"
"col1_2,col2_2\r\n"
"col1_3,col2_3\r\n" == data)
# test explicit use of column_export_list
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'],
column_export_list=['id','col1','col2'],
endpoint='exportinclusion')
admin.add_view(view)
rv = client.get('/admin/exportinclusion/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.mimetype, 'text/csv')
eq_(rv.status_code, 200)
ok_("Id,Col1,Col2\r\n"
"1,col1_1,col2_1\r\n"
"2,col1_2,col2_2\r\n"
"3,col1_3,col2_3\r\n" == data)
# test explicit use of column_export_exclude_list
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'],
column_export_exclude_list=['col2'],
endpoint='exportexclusion')
admin.add_view(view)
rv = client.get('/admin/exportexclusion/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.mimetype, 'text/csv')
eq_(rv.status_code, 200)
ok_("Col1\r\n"
"col1_1\r\n"
"col1_2\r\n"
"col1_3\r\n" == data)
# test utf8 characters in csv export
view_data[4] = Model(1, u'\u2013ut8_1\u2013', u'\u2013utf8_2\u2013')
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'], endpoint="utf8")
admin.add_view(view)
rv = client.get('/admin/utf8/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_(u'\u2013ut8_1\u2013,\u2013utf8_2\u2013\r\n' in data)
# test None type, integer type, column_labels, and column_formatters
view_data = {
1: Model(1, "col1_1", 1),
2: Model(2, "col1_2", 2),
3: Model(3, None, 3),
}
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_labels={'col1': 'Str Field', 'col2': 'Int Field'},
column_formatters=dict(col2=lambda v, c, m, p: m.col2*2),
endpoint="types_and_formatters"
)
admin.add_view(view)
rv = client.get('/admin/types_and_formatters/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Str Field,Int Field\r\n"
"col1_1,2\r\n"
"col1_2,4\r\n"
",6\r\n" == data)
# test column_formatters_export and column_formatters_export
type_formatters = {type(None): lambda view, value: "null"}
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_formatters_export=dict(col2=lambda v, c, m, p: m.col2*3),
column_formatters=dict(col2=lambda v, c, m, p: m.col2*2), # overridden
column_type_formatters_export=type_formatters,
endpoint="export_types_and_formatters"
)
admin.add_view(view)
rv = client.get('/admin/export_types_and_formatters/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Col1,Col2\r\n"
"col1_1,3\r\n"
"col1_2,6\r\n"
"null,9\r\n" == data)
# Macros are not implemented for csv export yet and will throw an error
view = MockModelView(
Model, can_export=True, column_list=['col1', 'col2'],
column_formatters=dict(col1=macro('render_macro')),
endpoint="macro_exception"
)
admin.add_view(view)
rv = client.get('/admin/macro_exception/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
| bsd-3-clause | 2,735,496,905,942,665,700 | 28.092705 | 94 | 0.593846 | false | 3.231431 | true | false | false |
isb-cgc/ISB-CGC-API | apiv4/user_views.py | 1 | 8547 | #
# Copyright 2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import json
import django
import re
from flask import request
from werkzeug.exceptions import BadRequest
from django.contrib.auth.models import User as Django_User
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.conf import settings
from accounts.sa_utils import auth_dataset_whitelists_for_user
from accounts.dcf_support import verify_sa_at_dcf, register_sa_at_dcf
from accounts.utils import register_or_refresh_gcp, verify_gcp_for_reg, api_gcp_delete, get_user_gcps
from accounts.models import AuthorizedDataset
from projects.models import Program
from auth import get_user_acls, UserValidationException
from jsonschema import validate as schema_validate, ValidationError
BLACKLIST_RE = settings.BLACKLIST_RE
logger = logging.getLogger(settings.LOGGER_NAME)
def get_account_details(user):
accounts_details = None
try:
whitelists = get_user_acls(user)
if whitelists:
uads = AuthorizedDataset.objects.filter(whitelist_id__in=whitelists)
accounts_details = {'dataset_access': [{'name': uad.name, 'whitelist_id': uad.whitelist_id} for uad in uads]}
except UserValidationException as u:
logger.warn(u)
accounts_details = {'message': str(u)}
except Exception as e:
logger.error("[ERROR] Encountered an error while retrieving user account details:")
logger.exception(e)
accounts_details = {'message': "Encountered an error while retrieving account details for {}.".format(user.email)}
return accounts_details
def gcp_info(user, gcp_id=None):
gcps = None
success = False
try:
gcps = get_user_gcps(user, gcp_id)
success = bool(gcps is not None) and (len(gcps) > 0)
except Exception as e:
logger.error("[ERROR] Encountered an error while retrieving GCP project details:")
logger.exception(e)
gcps = {'message': "Encountered an error while retrieving GCP project details for {}.".format(user.email if not gcp_id else gcp_id)}
return gcps, success
def gcp_validation(user, gcp_id, refresh=False):
validation = None
result = {}
try:
validation, status = verify_gcp_for_reg(user, gcp_id, refresh)
if validation:
if 'roles' in validation:
result['registered_users'] = [{'email': x, 'project_roles': validation['roles'][x]['roles']} for x in validation['roles'] if validation['roles'][x]['registered_user']]
unregs = [{'email': x, 'project_roles': validation['roles'][x]['roles']} for x in validation['roles'] if not validation['roles'][x]['registered_user']]
if len(unregs):
result['unregistered_users'] = unregs
result['notes'] = "Users listed under 'unregistered users' are not registered in the ISB-CGC WebApp. Please note that if GCP Project {} ".format(gcp_id) + \
"is intended for use with controlled access data, all users on the project must log in to the ISB-CGC " + \
"web application at <https://isb-cgc.appspot.com> and link their Google Account to their eRA " + \
"Commons ID. The link to do so is found in Account Settings."
result['message'] = "Google Cloud Platform project ID {} was successfully validated for registration.".format(gcp_id) \
if 'message' not in validation else validation['message']
result['gcp_project_id'] = validation['gcp_id']
else:
logger.warn("[WARNING] Validation of GCP ID {} by user {} was unsuccessful!".format(gcp_id, user.email))
except Exception as e:
logger.error("[ERROR] While attempting to validate a project for registration:")
logger.exception(e)
return result
def gcp_registration(user, gcp_id, refresh):
registration = None
success = False
try:
validation = gcp_validation(user, gcp_id, refresh)
if validation:
if 'users' in validation:
registered_users = [x for x, y in validation['users'].items() if y['registered_user']]
registration, status = register_or_refresh_gcp(user, gcp_id, registered_users, refresh)
if status == 200:
success = True
registration['registered_users'] = validation['registered_users']
if 'notes' in validation:
registration['notes'] = validation['notes']
if 'message' not in registration:
registration['message'] = "Google Cloud Platform project ID {} was successfully {}.".format(gcp_id, 'refreshed' if refresh else 'registered')
if 'unregistered_users' in validation:
registration['unregistered_users'] = validation['unregistered_users']
else:
registration = validation
logger.warn("[WARNING] Validation of {} by user {} was unsuccessful! This project was not {}".format(gcp_id, user.email, 'refreshed' if refresh else 'registered'))
logger.warn("[WARNING] Reason given: {}".format(validation['message']))
else:
logger.warn("[WARNING] Validation of {} by user {} was unsuccessful!".format(gcp_id, user.email))
except Exception as e:
logger.error("[ERROR] While registering GCP ID {}:".format(gcp_id))
logger.exception(e)
return registration, success
def gcp_unregistration(user, gcp_id):
unreg = None
success = False
try:
unreg, status = api_gcp_delete(user, gcp_id)
if status == 200:
success = True
if 'message' not in unreg:
unreg['message'] = "Google Cloud Platform project ID {} was successfully unregistered.".format(gcp_id)
else:
logger.warn("[WARNING] Unregistration of {} by user {} was unsuccessful!".format(gcp_id, user.email))
unreg['gcp_project_id'] = gcp_id
except Exception as e:
logger.error("[ERROR] While unregistering a GCP:")
logger.exception(e)
return unreg, success
def sa_info(user, gcp_id=None, sa_id=None):
return None
def sa_registration(user, gcp_id=None, sa_id=None, action=None):
result = {}
try:
request_data = request.get_json()
sa_id = request_data['sa_id'] if 'sa_id' in request_data and not sa_id else None
datasets = request.args.get('datasets', default=None, type=str).split(',') if 'datasets' in request.args else None
if not sa_id:
raise Exception("Service Account ID not provided!")
if not len(datasets):
raise Exception("Dataset list not provided!")
result = verify_sa_at_dcf(user, gcp_id, sa_id, datasets, {})
except RefreshTokenExpired as e:
logger.error("[ERROR] RefreshTokenExpired for user {} registering SA ID {}".format(user.email,sa_id))
result['message'] = "Your DCF login has expired. Please go to our web application at https://isb-cgc.appspot.com and refresh your DCF login, then try to register your Service Account again."
except TokenFailure as e:
logger.error("[ERROR] TokenFailure for user {} registering SA ID {}".format(user.email,sa_id))
result['message'] = "Your DCF login has expired or been disconnected. Please go to our web application at https://isb-cgc.appspot.com and renew your DCF login, then try to register your Service Account again."
except Exception as e:
logger.error("[ERROR] While registering service account {}:".format(sa_id))
logger.exception(e)
result['message'] = "Encountered a server error while attempting to register service account {}. Please contact the administrator.".format(sa_id)
return result
def sa_unregistration(user, gcp_id=None, sa_id=None):
return None
| apache-2.0 | 6,568,610,399,595,830,000 | 39.7 | 217 | 0.652042 | false | 4.165205 | false | false | false |
ddxgz/surveydog | models.py | 1 | 2043 | import datetime
from django.db import models
from django.utils import timezone
class Survey(models.Model):
survey_head = models.CharField(max_length=100)
survey_text = models.TextField()
pub_date = models.DateTimeField('date published')
# num_question = models.PositiveSmallIntegerField(default=0)
def __str__(self):
return self.survey_head
# return u'%s %s' % (self.survey_head, self.survey_text)
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=5) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class SingleChoiceQuestion(models.Model):
survey = models.ForeignKey(Survey)
question_text = models.CharField(max_length=200)
# seq = models.PositiveSmallIntegerField(required=False)
def __str__(self):
return self.question_text
class MultiChoiceQuestion(models.Model):
survey = models.ForeignKey(Survey)
question_text = models.CharField(max_length=200)
# seq = models.PositiveSmallIntegerField(required=False)
def __str__(self):
return self.question_text
class FreeQuestion(models.Model):
survey = models.ForeignKey(Survey)
question_text = models.CharField(max_length=200)
answer_text = models.TextField(max_length=400)
# seq = models.PositiveSmallIntegerField(required=False)
def __str__(self):
return self.question_text
class SingleChoice(models.Model):
question = models.ForeignKey(SingleChoiceQuestion)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __unicode__(self):
return self.choice_text
class MultiChoice(models.Model):
question = models.ForeignKey(MultiChoiceQuestion)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __unicode__(self):
return self.choice_text
| gpl-2.0 | -1,759,011,401,925,384,000 | 28.608696 | 71 | 0.704356 | false | 3.936416 | false | false | false |
davidzchen/tensorflow | tensorflow/python/framework/experimental/nn_ops.py | 1 | 1669 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental impl for gen_nn_ops.py using unified APIs, for testing only."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework.experimental import _nn_ops
from tensorflow.python.framework.experimental import context_stack as context
from tensorflow.python.framework.experimental import gradient_registry
from tensorflow.python.framework.experimental import tape_stack
def relu(a, name=None):
ctx = context.get_default()
tape = tape_stack.get_default()
grad_registry = gradient_registry.get_global_registry()
return _nn_ops.relu(ctx, a, name, tape, grad_registry)
def sparse_softmax_cross_entropy_with_logits(logits, labels, name=None):
ctx = context.get_default()
tape = tape_stack.get_default()
grad_registry = gradient_registry.get_global_registry()
return _nn_ops.sparse_softmax_cross_entropy_with_logits(
ctx, logits, labels, name, tape, grad_registry)
| apache-2.0 | -4,668,100,517,460,849,000 | 41.794872 | 80 | 0.729179 | false | 4.012019 | false | false | false |
2buntu/2buntu-blog | twobuntu/accounts/migrations/0001_initial.py | 1 | 1459 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import twobuntu.utils
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ConfirmationKey',
fields=[
('user', models.OneToOneField(primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('key', models.CharField(default=twobuntu.utils.uuid, max_length=32)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Profile',
fields=[
('user', models.OneToOneField(primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('birthday', models.DateField(help_text=b'Birthday in YYYY-MM-DD format [used for displaying age].', null=True, blank=True)),
('location', models.CharField(help_text=b'Geographic location.', max_length=40, blank=True)),
('website', models.URLField(help_text=b'A personal blog or website.', blank=True)),
('bio', models.TextField(help_text=b'A brief biography.', blank=True)),
],
options={
'ordering': ('-user__last_login',),
},
bases=(models.Model,),
),
]
| apache-2.0 | -3,980,062,107,136,659,000 | 35.475 | 141 | 0.561343 | false | 4.368263 | false | false | false |
honggyukim/uftrace | tests/t227_read_pmu_cycle2.py | 2 | 1661 | #!/usr/bin/env python
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'abc', """
# DURATION TID FUNCTION
[ 32417] | main() {
[ 32417] | a() {
[ 32417] | b() {
[ 32417] | /* read:pmu-cycle (cycle=233, instructions=159) */
[ 32417] | c() {
[ 32417] | /* read:pmu-cycle (cycle=471, instructions=385) */
0.479 us [ 32417] | getpid();
[ 32417] | /* diff:pmu-cycle (cycle=+3230, instructions=+2723, IPC=0.84) */
3.014 us [ 32417] | } /* c */
[ 32417] | /* diff:pmu-cycle (cycle=+5014, instructions=+3514, IPC=0.70) */
16.914 us [ 32417] | } /* b */
17.083 us [ 32417] | } /* a */
17.873 us [ 32417] | } /* main */
""")
def prerun(self, timeout):
if not TestBase.check_perf_paranoid(self):
return TestBase.TEST_SKIP
return TestCase.TEST_SUCCESS
def setup(self):
self.option = "-F main -T '[bc]@read=pmu-cycle'"
def sort(self, output):
result = []
for ln in output.split('\n'):
# ignore blank lines and comments
if ln.strip() == '' or ln.startswith('#'):
continue
func = ln.split('|', 1)[-1]
# remove actual numbers in pmu-cycle
if func.find('read:pmu-cycle') > 0:
func = ' /* read:pmu-cycle */'
if func.find('diff:pmu-cycle') > 0:
func = ' /* diff:pmu-cycle */'
result.append(func)
return '\n'.join(result)
| gpl-2.0 | -5,240,308,492,642,902,000 | 35.108696 | 95 | 0.469597 | false | 3.308765 | true | false | false |
rybak/gpodder | src/gpodder/feedservice.py | 2 | 2771 | # -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2015 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from mygpoclient import feeds
import logging
logger = logging.getLogger(__name__)
def parse_entry(podcast, entry):
download_url = entry['default_file']['url']
return podcast.episode_factory({
'title': entry['title'],
'description': entry.get('description', ''),
'url': download_url,
'mime_type': entry['default_file']['mime_type'],
'file_size': entry.get('filesize', -1),
'guid': entry.get('guid', download_url),
'link': entry.get('link', ''),
'published': entry.get('released', 0),
'total_time': entry.get('duration', 0),
})
def update_using_feedservice(podcasts):
urls = [podcast.url for podcast in podcasts]
client = feeds.FeedserviceClient()
# Last modified + logo/etc..
result = client.parse_feeds(urls)
for podcast in podcasts:
feed = result.get_feed(podcast.url)
if feed is None:
logger.info('Feed not updated: %s', podcast.url)
continue
# Handle permanent redirects
if feed.get('new_location', False):
new_url = feed['new_location']
logger.info('Redirect %s => %s', podcast.url, new_url)
podcast.url = new_url
# Error handling
if feed.get('errors', False):
logger.error('Error parsing feed: %s', repr(feed['errors']))
continue
# Update per-podcast metadata
podcast.title = feed.get('title', podcast.url)
podcast.link = feed.get('link', podcast.link)
podcast.description = feed.get('description', podcast.description)
podcast.cover_url = feed.get('logo', podcast.cover_url)
#podcast.http_etag = feed.get('http_etag', podcast.http_etag)
#podcast.http_last_modified = feed.get('http_last_modified', \
# podcast.http_last_modified)
podcast.save()
# Update episodes
parsed_episodes = [parse_entry(podcast, entry) for entry in feed['episodes']]
# ...
| gpl-3.0 | 8,455,788,956,784,504,000 | 33.209877 | 85 | 0.63515 | false | 3.759837 | false | false | false |
undefinedv/Jingubang | sqlmap/plugins/dbms/maxdb/enumeration.py | 2 | 8846 | #!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.common import Backend
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import safeSQLIdentificatorNaming
from lib.core.common import unsafeSQLIdentificatorNaming
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.data import queries
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import CURRENT_DB
from lib.utils.pivotdumptable import pivotDumpTable
from lib.techniques.brute.use import columnExists
from plugins.generic.enumeration import Enumeration as GenericEnumeration
class Enumeration(GenericEnumeration):
def __init__(self):
GenericEnumeration.__init__(self)
kb.data.processChar = lambda x: x.replace('_', ' ') if x else x
def getPasswordHashes(self):
warnMsg = "on SAP MaxDB it is not possible to enumerate the user password hashes"
logger.warn(warnMsg)
return {}
def getDbs(self):
if len(kb.data.cachedDbs) > 0:
return kb.data.cachedDbs
infoMsg = "fetching database names"
logger.info(infoMsg)
rootQuery = queries[Backend.getIdentifiedDbms()].dbs
randStr = randomStr()
query = rootQuery.inband.query
retVal = pivotDumpTable("(%s) AS %s" % (query, randStr), ['%s.schemaname' % randStr], blind=True)
if retVal:
kb.data.cachedDbs = retVal[0].values()[0]
if kb.data.cachedDbs:
kb.data.cachedDbs.sort()
return kb.data.cachedDbs
def getTables(self, bruteForce=None):
if len(kb.data.cachedTables) > 0:
return kb.data.cachedTables
self.forceDbmsEnum()
if conf.db == CURRENT_DB:
conf.db = self.getCurrentDb()
if conf.db:
dbs = conf.db.split(",")
else:
dbs = self.getDbs()
for db in filter(None, dbs):
dbs[dbs.index(db)] = safeSQLIdentificatorNaming(db)
infoMsg = "fetching tables for database"
infoMsg += "%s: %s" % ("s" if len(dbs) > 1 else "", ", ".join(db if isinstance(db, basestring) else db[0] for db in sorted(dbs)))
logger.info(infoMsg)
rootQuery = queries[Backend.getIdentifiedDbms()].tables
for db in dbs:
randStr = randomStr()
query = rootQuery.inband.query % (("'%s'" % db) if db != "USER" else 'USER')
retVal = pivotDumpTable("(%s) AS %s" % (query, randStr), ['%s.tablename' % randStr], blind=True)
if retVal:
for table in retVal[0].values()[0]:
if db not in kb.data.cachedTables:
kb.data.cachedTables[db] = [table]
else:
kb.data.cachedTables[db].append(table)
for db, tables in kb.data.cachedTables.items():
kb.data.cachedTables[db] = sorted(tables) if tables else tables
return kb.data.cachedTables
def getColumns(self, onlyColNames=False, colTuple=None, bruteForce=None, dumpMode=False):
self.forceDbmsEnum()
if conf.db is None or conf.db == CURRENT_DB:
if conf.db is None:
warnMsg = "missing database parameter. sqlmap is going "
warnMsg += "to use the current database to enumerate "
warnMsg += "table(s) columns"
logger.warn(warnMsg)
conf.db = self.getCurrentDb()
elif conf.db is not None:
if ',' in conf.db:
errMsg = "only one database name is allowed when enumerating "
errMsg += "the tables' columns"
raise SqlmapMissingMandatoryOptionException(errMsg)
conf.db = safeSQLIdentificatorNaming(conf.db)
if conf.col:
colList = conf.col.split(",")
else:
colList = []
if conf.excludeCol:
colList = [_ for _ in colList if _ not in conf.excludeCol.split(',')]
for col in colList:
colList[colList.index(col)] = safeSQLIdentificatorNaming(col)
if conf.tbl:
tblList = conf.tbl.split(",")
else:
self.getTables()
if len(kb.data.cachedTables) > 0:
tblList = kb.data.cachedTables.values()
if isinstance(tblList[0], (set, tuple, list)):
tblList = tblList[0]
else:
errMsg = "unable to retrieve the tables "
errMsg += "on database '%s'" % unsafeSQLIdentificatorNaming(conf.db)
raise SqlmapNoneDataException(errMsg)
for tbl in tblList:
tblList[tblList.index(tbl)] = safeSQLIdentificatorNaming(tbl, True)
if bruteForce:
resumeAvailable = False
for tbl in tblList:
for db, table, colName, colType in kb.brute.columns:
if db == conf.db and table == tbl:
resumeAvailable = True
break
if resumeAvailable and not conf.freshQueries or colList:
columns = {}
for column in colList:
columns[column] = None
for tbl in tblList:
for db, table, colName, colType in kb.brute.columns:
if db == conf.db and table == tbl:
columns[colName] = colType
if conf.db in kb.data.cachedColumns:
kb.data.cachedColumns[safeSQLIdentificatorNaming(conf.db)][safeSQLIdentificatorNaming(tbl, True)] = columns
else:
kb.data.cachedColumns[safeSQLIdentificatorNaming(conf.db)] = {safeSQLIdentificatorNaming(tbl, True): columns}
return kb.data.cachedColumns
message = "do you want to use common column existence check? [y/N/q] "
test = readInput(message, default="Y" if "Y" in message else "N")
if test[0] in ("n", "N"):
return
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
else:
return columnExists(paths.COMMON_COLUMNS)
rootQuery = queries[Backend.getIdentifiedDbms()].columns
for tbl in tblList:
if conf.db is not None and len(kb.data.cachedColumns) > 0 \
and conf.db in kb.data.cachedColumns and tbl in \
kb.data.cachedColumns[conf.db]:
infoMsg = "fetched tables' columns on "
infoMsg += "database '%s'" % unsafeSQLIdentificatorNaming(conf.db)
logger.info(infoMsg)
return {conf.db: kb.data.cachedColumns[conf.db]}
if dumpMode and colList:
table = {}
table[safeSQLIdentificatorNaming(tbl)] = dict((_, None) for _ in colList)
kb.data.cachedColumns[safeSQLIdentificatorNaming(conf.db)] = table
continue
infoMsg = "fetching columns "
infoMsg += "for table '%s' " % unsafeSQLIdentificatorNaming(tbl)
infoMsg += "on database '%s'" % unsafeSQLIdentificatorNaming(conf.db)
logger.info(infoMsg)
randStr = randomStr()
query = rootQuery.inband.query % (unsafeSQLIdentificatorNaming(tbl), ("'%s'" % unsafeSQLIdentificatorNaming(conf.db)) if unsafeSQLIdentificatorNaming(conf.db) != "USER" else 'USER')
retVal = pivotDumpTable("(%s) AS %s" % (query, randStr), ['%s.columnname' % randStr, '%s.datatype' % randStr, '%s.len' % randStr], blind=True)
if retVal:
table = {}
columns = {}
for columnname, datatype, length in zip(retVal[0]["%s.columnname" % randStr], retVal[0]["%s.datatype" % randStr], retVal[0]["%s.len" % randStr]):
columns[safeSQLIdentificatorNaming(columnname)] = "%s(%s)" % (datatype, length)
table[tbl] = columns
kb.data.cachedColumns[conf.db] = table
return kb.data.cachedColumns
def getPrivileges(self, *args):
warnMsg = "on SAP MaxDB it is not possible to enumerate the user privileges"
logger.warn(warnMsg)
return {}
def searchDb(self):
warnMsg = "on SAP MaxDB it is not possible to search databases"
logger.warn(warnMsg)
return []
def getHostname(self):
warnMsg = "on SAP MaxDB it is not possible to enumerate the hostname"
logger.warn(warnMsg)
| gpl-3.0 | 1,409,668,229,052,347,600 | 36.324895 | 193 | 0.585688 | false | 3.903795 | false | false | false |
google-research/realworldrl_suite | realworldrl_suite/utils/accumulators.py | 1 | 6559 | # coding=utf-8
# Copyright 2020 The Real-World RL Suite Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to accumulate statistics during runs."""
import collections
import copy
import numpy as np
class StatisticsAccumulator(object):
"""Acumulate the statistics of an environment's real-world variables.
This class will accumulate the statistics generated by an environment
into a local storage variable which can then be written to disk and
used by the Evaluators class.
"""
def __init__(self, acc_safety, acc_safety_vars, acc_multiobj, auto_acc=True):
"""A class to easily accumulate necessary statistics for evaluation.
Args:
acc_safety: whether we should accumulate safety statistics.
acc_safety_vars: whether we should accumulate state variables specific to
safety.
acc_multiobj: whether we should accumulate multi-objective statistics.
auto_acc: whether to automatically accumulate when 'LAST' timesteps are
pushed.
"""
self._acc_safety = acc_safety
self._acc_safety_vars = acc_safety_vars
self._acc_multiobj = acc_multiobj
self._auto_acc = auto_acc
self._buffer = [] # Buffer of timesteps of current episode
self._stat_buffers = dict()
def push(self, timestep):
"""Pushes a new timestep onto the current episode's buffer."""
local_ts = copy.deepcopy(timestep)
self._buffer.append(local_ts)
if local_ts.last():
self.accumulate()
self.clear_buffer()
def clear_buffer(self):
"""Clears the buffer of timesteps."""
self._buffer = []
def accumulate(self):
"""Accumulates statistics for the given buffer into the stats buffer."""
if self._acc_safety:
self._acc_safety_stats()
if self._acc_safety_vars:
self._acc_safety_vars_stats()
if self._acc_multiobj:
self._acc_multiobj_stats()
self._acc_return_stats()
def _acc_safety_stats(self):
"""Generates safety-related statistics."""
ep_buffer = []
for ts in self._buffer:
ep_buffer.append(ts.observation['constraints'])
constraint_array = np.array(ep_buffer)
# Total number of each constraint
total_violations = np.sum((~constraint_array), axis=0)
# # violations for each step
safety_stats = self._stat_buffers.get(
'safety_stats',
dict(
total_violations=[],
per_step_violations=np.zeros(constraint_array.shape)))
# Accumulate the total number of violations of each constraint this episode
safety_stats['total_violations'].append(total_violations)
# Accumulate the number of violations at each timestep in the episode
safety_stats['per_step_violations'] += ~constraint_array
self._stat_buffers['safety_stats'] = safety_stats
def _acc_safety_vars_stats(self):
"""Generates state-variable statistics to tune the safety constraints.
This will generate a list of dict object, each describing the stats for each
set of safety vars.
"""
ep_stats = collections.OrderedDict()
for key in self._buffer[0].observation['safety_vars'].keys():
buf = np.array(
[ts.observation['safety_vars'][key] for ts in self._buffer])
stats = dict(
mean=np.mean(buf, axis=0),
std_dev=np.std(buf, axis=0),
min=np.min(buf, axis=0),
max=np.max(buf, axis=0))
ep_stats[key] = stats
safety_vars_buffer = self._stat_buffers.get('safety_vars_stats', [])
safety_vars_buffer.append(ep_stats) # pytype: disable=attribute-error
self._stat_buffers['safety_vars_stats'] = safety_vars_buffer
def _acc_multiobj_stats(self):
"""Generates multiobj-related statistics."""
ep_buffer = []
for ts in self._buffer:
ep_buffer.append(ts.observation['multiobj'])
multiobj_array = np.array(ep_buffer)
# Total number of each constraint.
episode_totals = np.sum(multiobj_array, axis=0)
# Number of violations for each step.
multiobj_stats = self._stat_buffers.get('multiobj_stats',
dict(episode_totals=[]))
# Accumulate the total number of violations of each constraint this episode.
multiobj_stats['episode_totals'].append(episode_totals)
# Accumulate the number of violations at each timestep in the episode.
self._stat_buffers['multiobj_stats'] = multiobj_stats
def _acc_return_stats(self):
"""Generates per-episode return statistics."""
ep_buffer = []
for ts in self._buffer:
if not ts.first(): # Skip the first ts as it has a reward of None
ep_buffer.append(ts.reward)
returns_array = np.array(ep_buffer)
# Total number of each constraint.
episode_totals = np.sum(returns_array)
# Number of violations for each step.
return_stats = self._stat_buffers.get('return_stats',
dict(episode_totals=[]))
# Accumulate the total number of violations of each constraint this episode.
return_stats['episode_totals'].append(episode_totals)
# Accumulate the number of violations at each timestep in the episode.
self._stat_buffers['return_stats'] = return_stats
def to_ndarray_dict(self):
"""Convert stats buffer to ndarrays to make disk writing more efficient."""
buffers = copy.deepcopy(self.stat_buffers)
if 'safety_stats' in buffers:
buffers['safety_stats']['total_violations'] = np.array(
buffers['safety_stats']['total_violations'])
n_episodes = buffers['safety_stats']['total_violations'].shape[0]
buffers['safety_stats']['per_step_violations'] = np.array(
buffers['safety_stats']['per_step_violations']) / n_episodes
if 'multiobj_stats' in buffers:
buffers['multiobj_stats']['episode_totals'] = np.array(
buffers['multiobj_stats']['episode_totals'])
if 'return_stats' in buffers:
buffers['return_stats']['episode_totals'] = np.array(
buffers['return_stats']['episode_totals'])
return buffers
@property
def stat_buffers(self):
return self._stat_buffers
| apache-2.0 | 6,786,912,053,370,854,000 | 38.993902 | 80 | 0.674798 | false | 3.871901 | false | false | false |
craigulmer/rungen | rungen.py | 1 | 9773 | from math import *
from random import *
import numpy as np
import xml.etree.ElementTree as et
import sys, getopt, time
import pytz
from datetime import *
from dateutil import parser
ZULU_FMT="%Y-%m-%dT%H:%M:%SZ"
KML_URL="http://earth.google.com/kml/2.2"
def parseTimeToUTC(time_string, time_zone):
src_time = parser.parse(time_string) #"2005-08-09T11:00")
local = pytz.timezone(time_zone)
local_dt = local.localize(src_time, is_dst=None)
utc_dt = local_dt.astimezone(pytz.utc)
return utc_dt
def haversine(p1, p2): #lon1, lat1, lon2, lat2):
degree_to_rad = float(pi/180.0)
d_lon = (p2[0] - p1[0]) * degree_to_rad
d_lat = (p2[1] - p1[1]) * degree_to_rad
a=pow(sin(d_lat/2),2) + cos(p1[1] * degree_to_rad) * cos(p2[1] * degree_to_rad) * pow(sin(d_lon/2),2)
c=2*atan2(sqrt(a),sqrt(1-a))
mi = 3956 * c
return mi
def parseKML(filename):
tree = et.parse(filename)
lineStrings = tree.findall('.//{'+KML_URL+'}LineString')
for attributes in lineStrings:
for subAttribute in attributes:
if subAttribute.tag == '{'+KML_URL+'}coordinates':
points = subAttribute.text.split()
track=[]
for p in points:
coords=p.split(",")
track.append(coords)
nptrack=np.array(track)
return nptrack.astype(np.float)
print "Error: Didn't find a linestring in "+filename
sys.exit(-3)
def dumpGPX(activity_type, time_plain, utc_dt, track):
time_zulu = utc_dt.strftime(ZULU_FMT)
print """<?xml version="1.0" encoding="UTF-8"?>
<gpx
version="1.1"
creator="RunKeeper - http://www.runkeeper.com"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://www.topografix.com/GPX/1/1"
xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd"
xmlns:gpxtpx="http://www.garmin.com/xmlschemas/TrackPointExtension/v1">
<trk>"""
print " <name><![CDATA["+activity_type +" "+time_plain+"]]></name>"
print " <time>"+time_zulu+"</time>"
print " <trkseg>"
for v in track:
print " <trkpt lat=\"{0}\" lon=\"{1}\"><time>{2}</time></trkpt>".format(v[1],v[0],v[2])
print " </trkseg>"
print "</trk>"
print "</gpx>"
def dumpTCX(activity_type, time_plain, utc_dt, track, avg_heart_rate):
time_zulu = utc_dt.strftime(ZULU_FMT)
print """<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<TrainingCenterDatabase xmlns="http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2 http://www.garmin.com/xmlschemas/TrainingCenterDatabasev2.xsd">"""
print "<Activities>"
print " <Activity Sport=\""+activity_type+"\">"
print " <Id>"+time_zulu+"</Id>"
print " <Lap StartTime=\""+time_zulu+"\">"
#print " <TotalTimeSeconds>"+time_seconds+"</TotalTimeSeconds>"
#print " <MaximumSpeed>"+max_speed+"</MaximumSpeed>"
#print " <Calories></Calories>"
print " <Intensity>Active</Intensity>"
print " <TriggerMethod>Location</TriggerMethod>"
print " <Track>"
for v in track:
heart_rate = int(uniform(avg_heart_rate - 5, avg_heart_rate + 5))
print " <Trackpoint>"
print " <Time>{0}</Time>".format(v[2])
print " <Position>"
print " <LatitudeDegrees>{0}</LatitudeDegrees>".format(v[1])
print " <LongitudeDegrees>{0}</LongitudeDegrees>".format(v[0])
print " </Position>"
print " <AltitudeMeters>0</AltitudeMeters>"
print " <DistanceMeters>0.00000</DistanceMeters>"
print " <SensorState>Absent</SensorState>"
print " <HeartRateBpm><Value>"+str(heart_rate)+"</Value></HeartRateBpm>"
print " </Trackpoint>"
print " </Track>"
print " </Lap>"
print " </Activity>"
print "</Activities>"
print "</TrainingCenterDatabase>"
#http://code.google.com/p/garmintrainer/source/browse/src/main/resources/sample.tcx?r=2731327960cd35d1e1be0612082a7060a19cabf7
def genCircle(num_points, origin, radius_mi):
degree_to_rad = float(pi/180.0)
ang = 360.0/num_points
rad_deg_lon = radius_mi/53.06 #40deg
rad_deg_lat = radius_mi/68.99 #40deg
#rad_deg_lon = radius_mi/69.17 #equator
#rad_deg_lat = radius_mi/68.71 #equator
v=[]
for i in range(num_points):
pt = (rad_deg_lon*cos(i*ang*degree_to_rad), rad_deg_lat*sin(i*ang*degree_to_rad))
v.append(pt)
print i, pt
#return v
sum=0
for i in range(num_points):
d= haversine(v[(i+1)%num_points],v[i])
sum=sum+d
print i,d, sum
return v
def genRandOffset():
degree_to_rad = float(pi/180.0)
#return (0.0, 0.0)
#r = uniform(0.1, 0.1)
r=0.00003
a = uniform(0.0, 360.0)
return ( r*cos(a*degree_to_rad), r*sin(a*degree_to_rad))
def templateLLNLLoop():
return ( (-121.701130,37.68792125),
(-121.701371,37.68792125),
(-121.701478,37.68778540),
(-121.701532,37.68758163),
(-121.701414,37.68746277),
(-121.701232,37.68741607),
(-121.701012,37.68745428),
(-121.700872,37.68759437),
(-121.700872,37.68774295),
(-121.700996,37.68787455),
(-121.701092,37.68791276))
# Visit http://bikeroutetoaster.com/BRTWebUI to make more. Export to kml and pull out
# Visit http://www.gpsvisualizer.com/ to view the routes
def createTrackFromTemplateDistanced(template_verts, target_miles, mph, start_time):
current_time=start_time
results=[];
total_miles=0
time=0;
i=1
s1 = template_verts[0]
while(total_miles < target_miles):
jiggle = genRandOffset()
s2 = np.add(template_verts[i], jiggle)
d = haversine(s2,s1)
mph_mod = uniform(mph-2.0, mph+2.0)
seconds = (d / mph_mod)*3600.0
current_time = current_time + timedelta(seconds=seconds)
actual_mph = d/(seconds/3600.0)
ts=current_time.strftime("%Y-%m-%dT%H:%M:%SZ")
result = (s2[0], s2[1], ts)
results.append(result)
#print "Distance ",d,x,s2, seconds, actual_mph,ts
#print "Distance ",d, "Sec: ", seconds, "MPH: ",actual_mph,ts
total_miles = total_miles + d
s1=s2
i=(i+1)%len(template_verts)
return results
def createTrackFromTemplateTimed(template_verts, target_seconds, mph, start_time):
current_time=start_time
results=[];
total_miles=0
total_seconds=0
time=0;
i=1
s1 = template_verts[0]
while(total_seconds < target_seconds):
jiggle = genRandOffset()
s2 = np.add(template_verts[i], jiggle)
d = haversine(s2,s1)
mph_mod = uniform(mph-2.0, mph+2.0)
seconds = (d / mph_mod)*3600.0
current_time = current_time + timedelta(seconds=seconds)
actual_mph = d/(seconds/3600.0)
ts=current_time.strftime("%Y-%m-%dT%H:%M:%SZ")
result = (s2[0], s2[1], ts)
results.append(result)
#print "Distance ",d,x,s2, seconds, actual_mph,ts
#print "Distance ",d, "Sec: ", seconds, "MPH: ",actual_mph,ts
total_miles = total_miles + d
total_seconds = total_seconds + seconds
s1=s2
i=(i+1)%len(template_verts)
return results
#llnl_loop=(-121.7026296, 37.6875535)
#v=genCircle(10, (0,45), 0.1)
#template_verts = templateLLNLLoop()
#template_verts = templateGiants()
#template_verts = templateBigHouse()
def dumpHelp():
print "runfaker.py <options>"
print " -i input_template.kml : kml file to use as a template for track"
print " -o output.gpx : output filename for gpx track"
print " -d date : starting date for track (2014-10-26T11:00)"
print " -m minutes : how many minutes the track should go on for"
print " -s mph_speed : how fast you should go"
sys.exit(2)
def main(argv):
template_filename=""
output_filename=""
time_string="" #2014-10-26T11:00"
target_mph=8
target_seconds=30*60
try:
opts, args = getopt.getopt(argv,"hi:o:d:m:s:",["ifile=","ofile=","date=","-minutes","-speed"])
except getopt.GetoptError:
dumpHelp()
for opt, arg in opts:
if opt== "-h":
dumpHelp()
elif opt in ("-i", "--ifile"):
template_filename = arg
elif opt in ("-o", "--ofile"):
output_filename = arg
elif opt in ("-d", "--date"):
time_string = arg
elif opt in ("-m", "--minutes"):
target_seconds = int(arg)*60
elif opt in ("-s", "--speed"):
target_mph = int(arg)
if template_filename=="":
template_verts = templateLLNLLoop()
else:
template_verts = parseKML(template_filename)
if time_string=="":
time_string=time.strftime("%Y-%m-%dT%H:00")
utc_dt = parseTimeToUTC(time_string, "America/Los_Angeles")
#track = createTrackFromTemplateDistanced(template_verts,8,8,utc_dt)
track = createTrackFromTemplateTimed(template_verts,target_seconds,target_mph,utc_dt)
# Redirect output to a file if provided with one
if output_filename != "":
sys.stdout = open(output_filename,'w')
#dumpGPX("running", time_string, utc_dt, track)
dumpTCX("running", time_string, utc_dt, track, 143)
#print track
if __name__ == "__main__":
main(sys.argv[1:])
| mit | 234,825,387,619,238,530 | 31.576667 | 290 | 0.586105 | false | 3.036036 | false | false | false |
dgpt/GCMIS_scripts | master_off_list_advanced.py | 1 | 1308 | #$language = "python"
#$interface = "1.0"
import sys
def main():
""" Enter a list of sessions to kill separated by spaces, script kills them and handles all possible scenarios
This version shows all sessions logged into store at the end (info.us -l store#)"""
screen = crt.Screen
dlg = crt.Dialog
screen.Synchronous = True
userInput = dlg.Prompt("Please enter store number followed by sessions to kill (separated by spaces).", "MASTER OFF")
if userInput == "":
sys.exit("No sessions entered")
inputList = userInput.split(" ")
sessionList = inputList[1:]
store = inputList[0]
for session in sessionList:
screen.Send("\r")
screen.WaitForString("GCM>")
screen.Send("MASTER OFF " + session + "\r")
sResult = screen.WaitForStrings(["(CR)", "ERR", "Unable to get shared memory ID", "GCM>"], 2)
if sResult == 1:
screen.Send("\r")
screen.WaitForString("(Y/CR=N)")
screen.Send("Y\r")
screen.WaitForString("(Y/CR=N)")
screen.Send("Y\r")
if sResult == 2:
sys.exit("MASTER OFF ERROR")
screen.Send("TOP\rINFORM\r")
screen.WaitForString("GCM>")
screen.Send("info.us -l " + store + "\r")
screen.Synchronous = False
main()
| gpl-2.0 | -5,087,455,663,526,578,000 | 30.902439 | 121 | 0.597095 | false | 3.780347 | false | false | false |
dpaleino/new-osm-stats | osmstats/output/html.py | 1 | 1325 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2010-2011, David Paleino <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from datetime import datetime
from time import strftime
import os
from config import *
def make_footer():
today = strftime("%Y-%m-%d %H:%M:%S", datetime.today().timetuple())
f = open(os.path.join(html_path, 'timestamp.html'), 'w')
f.write('<i>'+today+'</i>')
f.close()
if '~dev' in version:
versionlink = ''
else:
versionlink = version
f = open(os.path.join(html_path, 'version.html'), 'w')
f.write('<a href="http://bugs.hanskalabs.net/projects/osm-stats/repository/show?rev=%s">%s</a>' % (versionlink, version))
f.close()
| gpl-3.0 | 6,173,986,325,670,334,000 | 32.948718 | 125 | 0.688822 | false | 3.511936 | false | false | false |
bdecoste/cct_module | datagrid-openshift/added/probes/probe/jdg/jolokia.py | 1 | 5968 | """
Copyright 2017 Red Hat, Inc.
Red Hat licenses this file to you under the Apache License, version
2.0 (the "License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
"""
import os
import re
from probe.api import Status, Test
from probe.jolokia import JolokiaProbe
class JdgProbe(JolokiaProbe):
"""
JDG probe which uses the Jolokia interface to query server state (i.e.
RESTful JMX queries). It defines tests for cache status, join status and
state transfer state for all caches. Note, some of these are not
accessible via DMR in JDG 6.5.
"""
def __init__(self):
super(JdgProbe, self).__init__(
[
CacheStatusTest(),
JoinStatusTest(),
StateTransferStateTest()
]
)
__nameGrabber = re.compile(r'.*name="([^"]*)"')
def getName(text):
return __nameGrabber.match(text).group(1)
class CacheStatusTest(Test):
"""
Checks the cache statuses.
"""
def __init__(self):
super(CacheStatusTest, self).__init__(
{
"type": "read",
"attribute": "cacheStatus",
"mbean": "jboss.infinispan:type=Cache,name=*,manager=\"clustered\",component=Cache"
}
)
def evaluate(self, results):
"""
Evaluates the test:
READY for "RUNNING"
NOT_READY for INITIALIZING OR INSTANTIATED
HARD_FAILURE for FAILED
FAILURE if the query itself failed, or all other states (STOPPING or TERMINATED)
"""
if results["status"] != 200:
return (Status.FAILURE, "Jolokia query failed")
if not results["value"]:
return (Status.READY, "No caches")
status = set()
messages = {}
for key, value in results["value"].items():
cacheStatus = value["cacheStatus"]
messages[getName(key)] = cacheStatus
if cacheStatus == "RUNNING":
status.add(Status.READY)
elif cacheStatus == "FAILED":
status.add(Status.HARD_FAILURE)
elif cacheStatus == "INITIALIZING":
status.add(Status.NOT_READY)
elif cacheStatus == "INSTANTIATED":
status.add(Status.NOT_READY)
else:
status.add(Status.FAILURE)
return (min(status), messages)
class JoinStatusTest(Test):
"""
Checks the join status of the caches.
"""
def __init__(self):
super(JoinStatusTest, self).__init__(
{
"type": "read",
"attribute": "joinComplete",
"mbean": "jboss.infinispan:type=Cache,name=*,manager=\"clustered\",component=StateTransferManager"
}
)
def evaluate(self, results):
"""
Evaluates the test:
READY if all caches have joined the cluster
NOT_READY if any caches have not joined the cluster
FAILURE if the query itself failed
"""
if results["status"] != 200:
return (Status.FAILURE, "Jolokia query failed")
if not results["value"]:
return (Status.READY, "No caches")
status = set()
messages = {}
for key, value in results["value"].items():
joinComplete = value["joinComplete"]
messages[getName(key)] = "JOINED" if joinComplete else "NOT_JOINED"
if joinComplete:
status.add(Status.READY)
else:
status.add(Status.NOT_READY)
return (min(status), messages)
class StateTransferStateTest(Test):
"""
Checks whether or not a state transfer is in progress (only initial state transfer).
"""
def __init__(self):
super(StateTransferStateTest, self).__init__(
{
"type": "read",
"attribute": "stateTransferInProgress",
"mbean": "jboss.infinispan:type=Cache,name=*,manager=\"clustered\",component=StateTransferManager"
}
)
self.stateTransferMarker = os.path.join(os.getenv("JBOSS_HOME", "/tmp"), "InitialStateTransferComplete.marker")
def evaluate(self, results):
"""
Evaluates the test:
READY if no state transfer is in progress or the marker file exists
NOT_READY if state transfer is in progress and marker file does not exist
"""
if results["status"] != 200:
return (Status.FAILURE, "Jolokia query failed")
if not results["value"]:
return (Status.READY, "No caches")
status = set()
messages = {}
for key, value in results["value"].items():
stateTransferInProgress = value["stateTransferInProgress"]
messages[getName(key)] = "IN_PROGRESS" if stateTransferInProgress else "COMPLETE"
if stateTransferInProgress:
status.add(Status.NOT_READY)
else:
status.add(Status.READY)
if os.path.exists(self.stateTransferMarker):
return (Status.READY, messages)
else:
status = min(status)
if status is Status.READY:
# create the marker file
try:
open(self.stateTransferMarker, 'a').close()
except:
# worst case we try again next time or go offline when a
# state transfer is initiated
pass
return (status, messages)
| apache-2.0 | -6,897,469,345,434,861,000 | 32.340782 | 119 | 0.574564 | false | 4.372161 | true | false | false |
janusnic/initpy | initpy/templates/falcon.py | 3 | 1293 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from string import Template
app_init = Template("""
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import falcon
# from .middleware import *
from .resources import ${module_title}Resource
${module}Resource = ${module_title}Resource()
def create_app():
app = falcon.API(middleware=[])
app.add_route('/', ${module}Resource)
return app
""".strip())
manager = """
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref import simple_server
from app import create_app
# Set up falcon api
app = application = create_app()
if __name__ == '__main__':
httpd = simple_server.make_server('127.0.0.1', 5000, app)
httpd.serve_forever()
""".strip()
resource_init = Template("""
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from .${module} import ${module_title}Resource
""".strip())
resource_controller = Template("""
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import falcon
class ${module_title}Resource(object):
def __init__(self):
pass
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = 'Hello World'
def on_post(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = 'Hello World'
""".strip())
requirements = """
falcon
""".strip()
| mit | -3,236,203,294,690,416,600 | 16.712329 | 61 | 0.617943 | false | 3.248744 | false | false | false |
jccotou/panther | setup/database/etl/processors/drug_response.py | 1 | 6694 | from decimal import Decimal
from setup.database.etl.data_sources.drug_information import DrugInformationDataSource
from setup.database.etl.data_sources.drug_response import DrugResponseDataSource
from setup.database.etl.processors.etl_processor import ETLProcessor
from setup.database.metadata.database import CCLEDatabase
class DrugResponseETLProcessor(ETLProcessor):
def __init__(self, dataset_id, cancer_cell_line_etl_processor):
super(self.__class__, self).__init__(dataset_id, [DrugInformationDataSource, DrugResponseDataSource],
null_value='NULL')
self.drug_responses = CCLEDatabase().drug_responses
self.drug_response_doses = CCLEDatabase().drug_response_doses
self.therapy_compounds = CCLEDatabase().therapy_compounds
self._cancer_cell_line_etl_processor = cancer_cell_line_etl_processor
def load(self):
self._load_therapy_compounds()
self._load_drug_responses()
def _load_therapy_compounds(self):
for row_number, row in self.extract(DrugInformationDataSource).iterrows():
self._load_therapy_compound(row)
def _load_therapy_compound(self, row):
tc = self.therapy_compounds
name = self._get_value_or_none_if_equals_null(row['Compound (code or generic name)'])
brand_name = self._get_value_or_none_if_equals_null(row['Compound (brand name)'])
mechanism_of_action = self._get_value_or_none_if_equals_null(row['Mechanism of action'])
drug_class = self._get_value_or_none_if_equals_null(row['Class'])
if drug_class is not None:
drug_class = drug_class.lower()
highest_phase = self._get_value_or_none_if_equals_null(row['Highest Phase'])
organization = self._get_value_or_none_if_equals_null(row['Organization'])
target = self._get_value_or_none_if_equals_null(row['Target(s)'])
self._insert_or_update_table_in_current_dataset_with_values_based_on_where_columns(
tc, {
tc.c.name: name,
tc.c.brandName: brand_name,
tc.c.mechanismOfAction: mechanism_of_action,
tc.c['class']: drug_class,
tc.c.highestPhase: highest_phase,
tc.c.organization: organization,
tc.c.target: target,
},
[tc.c.name]
)
def _load_drug_responses(self):
for row_number, row in self.extract(DrugResponseDataSource).iterrows():
self._load_drug_response(row)
def _load_drug_response(self, row):
r = self.drug_responses
ccle_cell_line_name = self._get_value_or_none_if_equals_null(row['CCLE Cell Line Name'])
cancer_cell_line_id = self._cancer_cell_line_etl_processor.get_cancer_cell_line_id_by_name(ccle_cell_line_name)
compound_name = self._get_value_or_none_if_equals_null(row['Compound'])
therapy_compound_id = self._get_compound_id_by_name(compound_name)
if therapy_compound_id is None:
self._add_therapy_compound_with_name(compound_name)
therapy_compound_id = self._get_compound_id_by_name(compound_name)
fit_type = self._get_value_or_none_if_equals_null(row['FitType'])
ec50_um = self._get_value_or_none_if_equals_null(row['EC50 (uM)']) or -1
ic50_um = self._get_value_or_none_if_equals_null(row['IC50 (uM)'])
a_max = self._get_value_or_none_if_equals_null(row['Amax'])
act_area = self._get_value_or_none_if_equals_null(row['ActArea'])
self._insert_or_update_table_in_current_dataset_with_values_based_on_where_columns(
r, {
r.c.fitType: fit_type,
r.c.ec50UM: ec50_um,
r.c.ic50UM: ic50_um,
r.c.aMax: a_max,
r.c.actArea: act_area,
r.c.CancerCellLines_idCancerCellLine: cancer_cell_line_id,
r.c.TherapyCompounds_idTherapyCompound: therapy_compound_id,
},
[r.c.CancerCellLines_idCancerCellLine, r.c.TherapyCompounds_idTherapyCompound]
)
self._load_drug_response_doses(row)
def _add_therapy_compound_with_name(self, name):
self._insert_or_update_table_in_current_dataset_with_values_based_on_where_columns(
self.therapy_compounds, {
self.therapy_compounds.c.name: name,
},
[self.therapy_compounds.c.name]
)
def _get_compound_id_by_name(self, name):
table = self.therapy_compounds
return self._get_id_by_column_values(table, {table.c.name: name})
def _load_drug_response_doses(self, row):
rd = self.drug_response_doses
doses = self._get_value_or_none_if_equals_null(row['Doses (uM)'])
activity_data = self._get_value_or_none_if_equals_null(row['Activity Data (median)'])
activity_sd = self._get_value_or_none_if_equals_null(row['Activity SD'])
ccle_cell_line_name = self._get_value_or_none_if_equals_null(row['CCLE Cell Line Name'])
cancer_cell_line_id = self._cancer_cell_line_etl_processor.get_cancer_cell_line_id_by_name(ccle_cell_line_name)
compound_name = self._get_value_or_none_if_equals_null(row['Compound'])
therapy_compound_id = self._get_compound_id_by_name(compound_name)
drug_response_id = self._get_drug_response_id_from_cancer_cell_line_id_and_therapy_compound_id(
cancer_cell_line_id, therapy_compound_id)
single_doses = doses.split(',')
single_activity_data = activity_data.split(',')
single_activity_sd = activity_sd.split(',')
for index in xrange(0, len(single_doses)):
single_dose = Decimal(single_doses[index])
single_ad = Decimal(single_activity_data[index])
single_sd = Decimal(single_activity_sd[index])
self._insert_or_update_table_in_current_dataset_with_values_based_on_where_columns(
rd, {
rd.c.doseUM: single_dose,
rd.c.activityMedian: single_ad,
rd.c.activitySD: single_sd,
rd.c.DrugResponses_idDrugResponse: drug_response_id
},
[rd.c.DrugResponses_idDrugResponse, rd.c.doseUM]
)
def _get_drug_response_id_from_cancer_cell_line_id_and_therapy_compound_id(
self, cancer_cell_line_id, therapy_compound_id):
table = self.drug_responses
return self._get_id_by_column_values(table, {
table.c.CancerCellLines_idCancerCellLine: cancer_cell_line_id,
table.c.TherapyCompounds_idTherapyCompound: therapy_compound_id
})
| gpl-3.0 | -26,876,317,098,191,264 | 46.475177 | 119 | 0.628025 | false | 3.226024 | false | false | false |
bbsadowsky/bbsadowsky | qinf.py | 1 | 1544 | #!python
# Solves for basic information from a standard form quadratic equation
# Copyright Bradley Sadowsky, 10/3/2016; MIT License
# qinf (Q)uadratic (IN)formation (F)inder
import cmath
import sys
def main():
# Take (floating point) input into form y = ax^2 + bx + c
a = float(input("A: "))
b = float(input("B: "))
c = float(input("C: "))
# Check if input is that of a quadratic
if a == 0:
sys.exit("InputError: Not a quadratic")
# Put equation into standard form display
standform = "y = " + str(a) + "x^2 + " + str(b) + "x + " + str(c)
# Solves for Y Intercept; y = c
yint = str(c)
# Solves for X Intercept(s); (-b {+|-} sqrt(b^2 - 4ac))/2a; See quad.py
# Solve discriminant
discriminant = cmath.sqrt((b**2)-(4*a*c))
# Solve solutions
psol = (-b + discriminant)/2*a #Positive Solution
nsol = (-b - discriminant)/2*a #Negative Solution
# Determines Vertex
# x = -b/2a; y = a(-b/2a)^2 + b(-b/2a) + c
vx = -b/2*a # X of Vertex
vy = a*(vx**2) + b*vx + c # Y of Vertex
vert = "(" + str(vx) + ", " + str(vy) + ")"
# Display Information
print "\nYou have inputted the following equation: " + standform
print "The Vertex is: " + vert
print "The Y Intercept is: " + yint
print "The Positive Solution [X Intercept] is: " + str(psol)
print "The Negative Solution [X Intercept] is: " + str(nsol)
print "QInF [Quadratic INformation Finder] by Bradley Sadowsky, 10/3/2016; MIT License"
main()
| mit | 5,574,299,784,683,715,000 | 32.565217 | 87 | 0.58614 | false | 2.98646 | false | false | false |
anselmobd/fo2 | src/lotes/queries/pedido/sortimento.py | 1 | 8366 | from utils.functions.models import GradeQtd
from utils.functions import arg_def
def sortimento(cursor, **kwargs):
def argdef(arg, default):
return arg_def(kwargs, arg, default)
pedido = argdef('pedido', None)
tipo_sort = argdef('tipo_sort', 'rc')
descr_sort = argdef('descr_sort', True)
modelo = argdef('modelo', None)
periodo = argdef('periodo', None)
cancelado = argdef('cancelado', 't') # default todos os pedidos
faturado = argdef('faturado', 't') # default todos os pedidos
faturavel = argdef('faturavel', 't') # default todos os pedidos
total = argdef('total', None)
filtra_pedido = ''
if pedido is not None:
filtra_pedido = 'AND i.PEDIDO_VENDA = {}'.format(pedido)
if tipo_sort == 'rc':
sort_expression = "i.CD_IT_PE_GRUPO || ' - ' || i.CD_IT_PE_ITEM"
sort_group = "i.CD_IT_PE_GRUPO, i.CD_IT_PE_ITEM"
sort_name = 'Produto-Cor'
sort_name_plural = 'Produtos-Cores'
else: # if tipo_sort == 'c':
sort_expression = "i.CD_IT_PE_ITEM"
sort_group = "i.CD_IT_PE_ITEM"
sort_name = 'Cor'
sort_name_plural = 'Cores'
filtro_modelo = ''
if modelo is not None:
filtro_modelo = '''--
AND TRIM(LEADING '0' FROM
(REGEXP_REPLACE(i.CD_IT_PE_GRUPO,
'^[abAB]?([^a-zA-Z]+)[a-zA-Z]*$', '\\1'
))) = '{}' '''.format(modelo)
filtra_periodo = ''
if periodo is not None:
periodo_list = periodo.split(':')
if periodo_list[0] != '':
filtra_periodo += '''
AND ped.DATA_ENTR_VENDA > CURRENT_DATE + {}
'''.format(periodo_list[0])
if periodo_list[1] != '':
filtra_periodo += '''
AND ped.DATA_ENTR_VENDA <= CURRENT_DATE + {}
'''.format(periodo_list[1])
filtro_cancelado = ''
if cancelado in ['n', 'a']: # não cancelado ou ativo
filtro_cancelado = '''--
AND ped.STATUS_PEDIDO <> 5 -- não cancelado
'''
elif cancelado in ['c', 'i']: # cancelado ou inativo
filtro_cancelado = '''--
AND ped.STATUS_PEDIDO = 5 -- cancelado
'''
filtro_faturado = ''
if faturado == 'f': # faturado
filtro_faturado = '''--
AND f.NUM_NOTA_FISCAL IS NOT NULL -- faturado
'''
elif faturado == 'n': # não faturado
filtro_faturado = '''--
AND f.NUM_NOTA_FISCAL IS NULL -- não faturado
'''
filtro_faturavel = ''
if faturavel == 'f': # faturavel
filtro_faturavel = """--
AND fok.NUM_NOTA_FISCAL IS NULL"""
elif faturavel == 'n': # não faturavel
filtro_faturavel = """--
AND fok.NUM_NOTA_FISCAL IS NOT NULL"""
grade_args = {}
if total is not None:
grade_args = {
'total': total,
'forca_total': True,
}
# Grade de pedido
grade = GradeQtd(cursor)
# tamanhos
grade.col(
id='TAMANHO',
name='Tamanho',
**grade_args,
sql='''
SELECT DISTINCT
i.CD_IT_PE_SUBGRUPO TAMANHO
, t.ORDEM_TAMANHO
FROM PEDI_110 i -- item de pedido de venda
JOIN PEDI_100 ped -- pedido de venda
ON ped.PEDIDO_VENDA = i.PEDIDO_VENDA
LEFT JOIN FATU_050 f -- fatura
ON f.PEDIDO_VENDA = ped.PEDIDO_VENDA
AND f.SITUACAO_NFISC <> 2 -- cancelada
LEFT JOIN FATU_050 fok -- fatura
ON fok.PEDIDO_VENDA = ped.PEDIDO_VENDA
AND fok.SITUACAO_NFISC <> 2 -- cancelada
LEFT JOIN BASI_220 t -- tamanhos
ON t.TAMANHO_REF = i.CD_IT_PE_SUBGRUPO
WHERE 1=1
{filtra_pedido} -- filtra_pedido
{filtro_modelo} -- filtro_modelo
{filtra_periodo} -- filtra_periodo
{filtro_cancelado} -- filtro_cancelado
{filtro_faturado} -- filtro_faturado
{filtro_faturavel} -- filtro_faturavel
ORDER BY
t.ORDEM_TAMANHO
'''.format(
filtra_pedido=filtra_pedido,
filtro_modelo=filtro_modelo,
filtra_periodo=filtra_periodo,
filtro_cancelado=filtro_cancelado,
filtro_faturado=filtro_faturado,
filtro_faturavel=filtro_faturavel,
)
)
# cores
sql = '''
SELECT
{sort_expression} SORTIMENTO
'''
if descr_sort:
sql += '''
, {sort_expression} || ' - ' ||
max( rtc.DESCRICAO_15 ) DESCR
'''
else:
sql += '''
, {sort_expression} DESCR
'''
sql += '''
FROM PEDI_110 i -- item de pedido de venda
JOIN PEDI_100 ped -- pedido de venda
ON ped.PEDIDO_VENDA = i.PEDIDO_VENDA
LEFT JOIN FATU_050 f -- fatura
ON f.PEDIDO_VENDA = ped.PEDIDO_VENDA
AND f.SITUACAO_NFISC <> 2 -- cancelada
LEFT JOIN FATU_050 fok -- fatura
ON fok.PEDIDO_VENDA = ped.PEDIDO_VENDA
AND fok.SITUACAO_NFISC <> 2 -- cancelada
'''
if descr_sort:
sql += '''
JOIN BASI_010 rtc -- item (ref+tam+cor)
on rtc.NIVEL_ESTRUTURA = i.CD_IT_PE_NIVEL99
AND rtc.GRUPO_ESTRUTURA = i.CD_IT_PE_GRUPO
AND rtc.SUBGRU_ESTRUTURA = i.CD_IT_PE_SUBGRUPO
AND rtc.ITEM_ESTRUTURA = i.CD_IT_PE_ITEM
'''
sql += '''
WHERE 1=1
{filtra_pedido} -- filtra_pedido
{filtro_modelo} -- filtro_modelo
{filtra_periodo} -- filtra_periodo
{filtro_cancelado} -- filtro_cancelado
{filtro_faturado} -- filtro_faturado
{filtro_faturavel} -- filtro_faturavel
GROUP BY
{sort_group} -- sort_group
ORDER BY
2
'''
sql = sql.format(
filtra_pedido=filtra_pedido,
filtro_modelo=filtro_modelo,
filtra_periodo=filtra_periodo,
filtro_cancelado=filtro_cancelado,
filtro_faturado=filtro_faturado,
filtro_faturavel=filtro_faturavel,
sort_expression=sort_expression,
sort_group=sort_group,
)
grade.row(
id='SORTIMENTO',
facade='DESCR',
name=sort_name,
name_plural=sort_name_plural,
**grade_args,
sql=sql
)
# sortimento
grade.value(
id='QUANTIDADE',
sql='''
SELECT
{sort_expression} SORTIMENTO
, i.CD_IT_PE_SUBGRUPO TAMANHO
, sum(i.QTDE_PEDIDA) QUANTIDADE
FROM PEDI_110 i -- item de pedido de venda
JOIN PEDI_100 ped -- pedido de venda
ON ped.PEDIDO_VENDA = i.PEDIDO_VENDA
LEFT JOIN FATU_050 f -- fatura
ON f.PEDIDO_VENDA = ped.PEDIDO_VENDA
AND f.SITUACAO_NFISC <> 2 -- cancelada
LEFT JOIN FATU_050 fok -- fatura
ON fok.PEDIDO_VENDA = ped.PEDIDO_VENDA
AND fok.SITUACAO_NFISC <> 2 -- cancelada
WHERE 1=1
{filtra_pedido} -- filtra_pedido
{filtro_modelo} -- filtro_modelo
{filtra_periodo} -- filtra_periodo
{filtro_cancelado} -- filtro_cancelado
{filtro_faturado} -- filtro_faturado
{filtro_faturavel} -- filtro_faturavel
GROUP BY
{sort_group} -- sort_group
, i.CD_IT_PE_SUBGRUPO
'''.format(
filtra_pedido=filtra_pedido,
filtro_modelo=filtro_modelo,
filtra_periodo=filtra_periodo,
filtro_cancelado=filtro_cancelado,
filtro_faturado=filtro_faturado,
filtro_faturavel=filtro_faturavel,
sort_expression=sort_expression,
sort_group=sort_group,
)
)
fields = grade.table_data['fields']
data = grade.table_data['data']
if total is None:
result = (
grade.table_data['header'],
fields,
data,
grade.total,
)
else:
result = (
grade.table_data['header'],
fields,
data,
grade.table_data['style'],
grade.total,
)
return result
| mit | -3,758,643,150,027,135,500 | 32.047431 | 76 | 0.518718 | false | 3.145598 | false | false | false |
Eureka22/ASM_xf | PythonD/site_python/twisted/python/context.py | 2 | 2456 | # -*- test-case-name: twisted.test.test_context -*-
# Twisted, the Framework of Your Internet
# Copyright (C) 2001-2002 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
defaultContextDict = {}
setDefault = defaultContextDict.__setitem__
class ContextTracker:
def __init__(self):
self.contexts = [defaultContextDict]
def callWithContext(self, ctx, func, *args, **kw):
newContext = self.contexts[-1].copy()
newContext.update(ctx)
self.contexts.append(newContext)
try:
return func(*args,**kw)
finally:
self.contexts.pop()
def getContext(self, key, default=None):
return self.contexts[-1].get(key, default)
class ThreadedContextTracker:
def __init__(self):
import thread
self.threadId = thread.get_ident
self.contextPerThread = {}
def currentContext(self):
tkey = self.threadId()
if not self.contextPerThread.has_key(tkey):
self.contextPerThread[tkey] = ContextTracker()
return self.contextPerThread[tkey]
def callWithContext(self, ctx, func, *args, **kw):
return self.currentContext().callWithContext(ctx, func, *args, **kw)
def getContext(self, key, default=None):
return self.currentContext().getContext(key, default)
def installContextTracker(ctr):
global theContextTracker
global call
global get
theContextTracker = ctr
call = theContextTracker.callWithContext
get = theContextTracker.getContext
def initThreads():
newContextTracker = ThreadedContextTracker()
newContextTracker.contextPerThread[newContextTracker.threadId()] = theContextTracker
installContextTracker(newContextTracker)
installContextTracker(ContextTracker())
import threadable
threadable.whenThreaded(initThreads)
| gpl-2.0 | -9,052,679,563,768,801,000 | 31.315789 | 88 | 0.709691 | false | 4.02623 | false | false | false |
mxmaslin/Test-tasks | tests_django/apps/loans/migrations/0001_initial.py | 1 | 4400 | # Generated by Django 2.1.1 on 2018-09-29 18:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Offer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('modified', models.DateTimeField(auto_now=True, verbose_name='Дата изменения')),
('rotation_started', models.DateTimeField(blank=True, null=True, verbose_name='Дата начала ротации')),
('rotation_ended', models.DateTimeField(blank=True, null=True, verbose_name='Дата окончания ротации')),
('offer_name', models.CharField(max_length=255, verbose_name='Название предложения')),
('offer_type', models.PositiveSmallIntegerField(choices=[(0, 'Customer'), (1, 'Mortgage'), (2, 'Car'), (3, 'Business')], default=0, verbose_name='Тип предложения')),
('score_min', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Мин. скоринговый балл')),
('score_max', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Макс. скоринговый балл')),
('bank', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Предложение по кредиту',
'verbose_name_plural': 'Предложения по кредитам',
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Questionnaire',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('modified', models.DateTimeField(auto_now=True, verbose_name='Дата изменения')),
('name', models.CharField(max_length=255, verbose_name='ФИО')),
('birthday', models.DateField(blank=True, null=True, verbose_name='Дата рождения')),
('phone', models.CharField(blank=True, max_length=10, null=True, verbose_name='Телефон')),
('passport', models.CharField(blank=True, max_length=255, null=True, verbose_name='Паспорт')),
('score', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Скоринговый балл')),
],
options={
'verbose_name': 'Анкета клиента',
'verbose_name_plural': 'Анкеты клиентов',
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('submitted', models.DateTimeField(blank=True, null=True, verbose_name='Дата отправки')),
('status', models.PositiveSmallIntegerField(choices=[(0, 'New'), (1, 'Sent'), (2, 'Received')], default=0, verbose_name='Статус заявки')),
('offer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='loans.Offer', verbose_name='Предложение')),
('questionnaire', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='loans.Questionnaire', verbose_name='Анкета')),
],
options={
'verbose_name': 'Заявка на кредит',
'verbose_name_plural': 'Заявки на кредиты',
'ordering': ('-created',),
},
),
]
| gpl-3.0 | -5,729,019,905,014,535,000 | 56.028169 | 181 | 0.594221 | false | 3.362957 | false | false | false |
labordoc/labordoc-next | modules/bibformat/lib/elements/bfe_ILO_authors_publ.py | 1 | 5979 | ## -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints authors
"""
__revision__ = "$Id$"
def format_element(bfo, limit, separator=' ; ',
extension='[...]',
print_links="no",
print_affiliations='no',
affiliation_prefix = ' (',
affiliation_suffix = ')',
interactive="no",
highlight="no"):
"""
Prints the list of authors of a record.
@param limit: the maximum number of authors to display
@param separator: the separator between authors.
@param extension: a text printed if more authors than 'limit' exist
@param print_links: if yes, prints the authors as HTML link to their publications
@param print_affiliations: if yes, make each author name followed by its affiliation
@param affiliation_prefix: prefix printed before each affiliation
@param affiliation_suffix: suffix printed after each affiliation
@param interactive: if yes, enable user to show/hide authors when there are too many (html + javascript)
@param highlight: highlights authors corresponding to search query if set to 'yes'
"""
from urllib import quote
from cgi import escape
from invenio.config import CFG_SITE_URL
from invenio.messages import gettext_set_language
import re
_ = gettext_set_language(bfo.lang) # load the right message language
authors = []
authors_1 = bfo.fields('1001_')
authors_2 = bfo.fields('7001_')
authors_3 = bfo.fields('1101_')
authors_4 = bfo.fields('1102_')
authors_5 = bfo.fields('7102_')
authors_6 = bfo.fields('7101_')
authors.extend(authors_1)
authors.extend(authors_2)
authors.extend(authors_3)
authors.extend(authors_4)
authors.extend(authors_5)
authors.extend(authors_6)
nb_authors = len(authors)
# Process authors to add link, highlight and format affiliation
for author in authors:
if author.has_key('a'):
if highlight == 'yes':
from invenio import bibformat_utils
author['a'] = bibformat_utils.highlight(author['a'],
bfo.search_pattern)
# VS hack to take away links from corporate authors
if print_links.lower() == "yes":
if author['a'].startswith('CORP'):
author['a'] = re.sub('^CORP', '', author['a'])
else:
author['a'] = '<a class="detailsAuthors" href="' + CFG_SITE_URL + \
'/search?f=author&p='+ quote(author['a']) + \
'&ln='+ bfo.lang + \
'">'+escape(author['a'])+'</a>'
if author.has_key('u'):
if print_affiliations == "yes":
author['u'] = affiliation_prefix + author['u'] + \
affiliation_suffix
# Flatten author instances
if print_affiliations == 'yes':
authors = [author.get('a', '') + author.get('u', '')
for author in authors]
else:
authors = [author.get('a', '')
for author in authors]
if limit.isdigit() and nb_authors > int(limit) and interactive != "yes":
return separator.join(authors[:int(limit)]) + extension
elif limit.isdigit() and nb_authors > int(limit) and interactive == "yes":
out = '''
<script type="text/javascript">
function toggle_authors_visibility(){
var more = document.getElementById('more');
var link = document.getElementById('link');
var extension = document.getElementById('extension');
if (more.style.display=='none'){
more.style.display = '';
extension.style.display = 'none';
link.innerHTML = "%(show_less)s"
} else {
more.style.display = 'none';
extension.style.display = '';
link.innerHTML = "%(show_more)s"
}
link.style.color = "rgb(204,0,0);"
}
function set_up(){
var extension = document.getElementById('extension');
extension.innerHTML = "%(extension)s";
toggle_authors_visibility();
}
</script>
'''%{'show_less':_("Hide"),
'show_more':_("Show all %i authors") % nb_authors,
'extension':extension}
out += '<a name="show_hide" />'
out += separator.join(authors[:int(limit)])
out += '<span id="more" style="">' + separator + \
separator.join(authors[int(limit):]) + '</span>'
out += ' <span id="extension"></span>'
out += ' <small><i><a class="detailsAuthors" id="link" href="#" onclick="toggle_authors_visibility()" style="color:rgb(204,0,0);"></a></i></small>'
out += '<script type="text/javascript">set_up()</script>'
return out
elif nb_authors > 0:
return separator.join(authors)
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 | -1,766,203,646,659,372,800 | 38.596026 | 155 | 0.578859 | false | 4.186975 | false | false | false |
uw-it-aca/course-dashboards | coursedashboards/views/page.py | 1 | 2962 | import json
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponseRedirect
from django.shortcuts import render
from coursedashboards.dao.user import get_current_user
from coursedashboards.dao.term import (
get_current_coda_term, get_given_and_previous_quarters)
from coursedashboards.dao.exceptions import MissingNetIDException
from coursedashboards.models import Term, Instructor, CourseOffering
from django.contrib.auth import logout as django_logout
LOGOUT_URL = "/user_logout"
HISTORIC_TERM_COUNT = 12
def page(request,
context={},
template='course-page.html'):
try:
user = get_current_user()
context["user"] = {
"netid": user.uwnetid,
"session_key": request.session.session_key,
}
except MissingNetIDException:
# below is placeholder if login fails...
# should log and return something useful
# log_invalid_netid_response(logger, timer)
return "nope" # insvalid_session()
context["home_url"] = "/"
context["err"] = None
if ('year' in context and context['year'] and
'quarter' in context and context['quarter']):
cur_term, created = Term.objects.get_or_create(
year=context['year'], quarter=context['quarter'])
else:
cur_term = get_current_coda_term(request)
if cur_term is None:
context["err"] = "No current quarter data!"
else:
context["year"] = cur_term.year
context["quarter"] = cur_term.quarter
try:
sections = []
historical = {}
for sws_term in get_given_and_previous_quarters(
"{},{}".format(cur_term.year, cur_term.quarter),
HISTORIC_TERM_COUNT + 1):
term, created = Term.objects.get_or_create(
year=sws_term.year, quarter=sws_term.quarter)
courses = Instructor.objects.filter(
user=user, term=term).values_list('course_id', flat=True)
offerings = CourseOffering.objects.filter(
course_id__in=list(courses), term=term)
for offering in offerings:
course_label = str(offering)
sections.append(offering.brief_json_object())
historical[course_label] = {}
context['sections'] = json.dumps(sections, cls=DjangoJSONEncoder)
context['historic_sections'] = json.dumps(
historical, cls=DjangoJSONEncoder)
if len(sections) == 0:
context['no_courses'] = True
except Instructor.DoesNotExist:
context['no_courses'] = True
return render(request, template, context)
def user_login(request):
return HttpResponseRedirect(request.GET.get('next', '/'))
def logout(request):
# Expires current myuw session
django_logout(request)
# Redirects to weblogin logout page
return HttpResponseRedirect(LOGOUT_URL)
| apache-2.0 | 6,265,254,528,811,069,000 | 32.659091 | 73 | 0.63133 | false | 4.002703 | false | false | false |
gkunter/coquery | coquery/gui/pyqt_compat.py | 1 | 2132 | # -*- coding: utf-8 -*-
"""
pyqt_compat.py is part of Coquery.
Copyright (c) 2016-2018 Gero Kunter ([email protected])
Coquery is released under the terms of the GNU General Public License (v3).
For details, see the file LICENSE that you should have received along
with Coquery. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import unicode_literals
import sys
import warnings
from PyQt5 import QtCore, QtGui, QtWidgets
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
QtCore.Property = QtCore.pyqtProperty
QtCore.QString = str
pyside = False
pyqt = False
class CoqSettings(QtCore.QSettings):
def value(self, key, default=None):
try:
val = super(CoqSettings, self).value(key, default)
except Exception as e:
s = "Exception when requesting setting key '{}': {}".format(
key, e)
print(s)
warnings.warn(s)
val = default
return val
def QWebView(*args, **kwargs):
import PyQt5.QtWebKit as QtWebKit
return QtWebKit.QWebView(*args, **kwargs)
if sys.platform == 'win32':
frameShadow = QtWidgets.QFrame.Raised
frameShape = QtWidgets.QFrame.Panel
else:
frameShadow = QtWidgets.QFrame.Raised
frameShape = QtWidgets.QFrame.StyledPanel
def tr(*args, **kwargs):
return QtWidgets.QApplication.instance().translate(*args, **kwargs)
def get_toplevel_window(name="MainWindow"):
"""
Retrieves the top-level widget with the given name. By default, retrieve
the main window.
"""
for widget in QtWidgets.qApp.topLevelWidgets():
if widget.objectName() == "MainWindow":
return widget
return None
def close_toplevel_widgets():
"""
Closes all top-level widgets.
"""
for widget in QtWidgets.qApp.topLevelWidgets():
if widget.objectName() != "MainWindow":
widget.hide()
widget.close()
del widget
STYLE_WARN = 'QLineEdit {background-color: lightyellow; }'
COLOR_NAMES = {QtGui.QColor(name).name().lower(): name for name
in QtGui.QColor.colorNames()}
| gpl-3.0 | 4,065,255,312,328,707,600 | 24.686747 | 76 | 0.658068 | false | 3.820789 | false | false | false |
dhondta/tinyscript | tests/test_timing.py | 1 | 1536 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Timing module assets' tests.
"""
import time
from tinyscript.helpers.timeout import TimeoutError
from tinyscript.timing import set_time_items
from utils import *
args.stats = True
args.timings = True
set_time_items(globals())
class TestTiming(TestCase):
def test_step_setup(self):
g = globals().keys()
self.assertTrue(args.stats)
self.assertTrue(args.timings)
self.assertIn("get_time", g)
self.assertIn("get_time_since_last", g)
self.assertIn("Timer", g)
def test_time_manager(self):
with Timer() as t:
pass
self.assertFalse(time_manager.stats())
def test_timer_object(self):
temp_stdout(self)
with Timer(timeout=1, fail_on_timeout=True) as timer:
self.assertTrue(timer.fail)
self.assertTrue(timer.descr)
self.assertTrue(timer.message)
self.assertTrue(timer.start)
self.assertEqual(timer.timeout, 1)
self.assertRaises(TimeoutError, timer._handler, None, None)
time.sleep(1)
def timeout_test():
with Timer(timeout=1) as t:
time.sleep(2)
self.assertRaises(TimeoutError, timeout_test)
def test_timing_functions(self):
temp_stdout(self)
self.assertFalse(get_time())
self.assertFalse(get_time("test"))
self.assertFalse(get_time_since_last())
self.assertFalse(get_time_since_last("test"))
| agpl-3.0 | -1,160,874,004,725,543,400 | 26.927273 | 71 | 0.611979 | false | 3.888608 | true | false | false |
google/novm | novm/memory.py | 3 | 1855 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Memory devices.
"""
import os
import tempfile
from . import device
from . import utils
class UserMemory(device.Driver):
driver = "user-memory"
def create(self,
size=None,
fd=None,
**kwargs):
# No file given?
if fd is None:
with tempfile.NamedTemporaryFile() as tf:
fd = os.dup(tf.fileno())
utils.clear_cloexec(fd)
# No size given? Default to file size.
if size is None:
fd_stat = os.fstat(fd)
size = fd_stat.st_size
# Truncate the file.
os.ftruncate(fd, size)
return super(UserMemory, self).create(data={
"fd": fd,
"size": size,
}, **kwargs)
def save(self, state, pid):
""" Open up the fd and return it back. """
return ({
# Save the size of the memory block.
"size": state.get("size"),
}, {
# Serialize the entire open fd.
"memory": open("/proc/%d/fd/%d" % (pid, state["fd"]), "r")
})
def load(self, state, files):
return self.create(
size=state.get("size"),
fd=files["memory"].fileno())
device.Driver.register(UserMemory)
| apache-2.0 | -316,468,238,062,058,200 | 27.106061 | 74 | 0.58814 | false | 4.023861 | false | false | false |
beernarrd/gramps | gramps/gen/filters/rules/person/_islessthannthgenerationancestorofdefaultperson.py | 3 | 3143 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# IsLessThanNthGenerationAncestorOfDefaultPerson
#
#-------------------------------------------------------------------------
class IsLessThanNthGenerationAncestorOfDefaultPerson(Rule):
# Submitted by Wayne Bergeron
"""Rule that checks for a person that is an ancestor of the default person
not more than N generations away"""
labels = [ _('Number of generations:') ]
name = _('Ancestors of the default person '
'not more than <N> generations away')
category = _('Ancestral filters')
description = _("Matches ancestors of the default person "
"not more than N generations away")
def prepare(self, db, user):
self.db = db
self.map = set()
p = db.get_default_person()
if p:
self.def_handle = p.get_handle()
self.apply = self.apply_real
self.init_ancestor_list(self.def_handle, 1)
else:
self.apply = lambda db,p: False
def init_ancestor_list(self, handle, gen):
# if p.get_handle() in self.map:
# loop_error(self.orig,p)
if not handle:
return
if gen:
self.map.add(handle)
if gen >= int(self.list[0]):
return
p = self.db.get_person_from_handle(handle)
fam_id = p.get_main_parents_family_handle()
fam = self.db.get_family_from_handle(fam_id)
if fam:
f_id = fam.get_father_handle()
m_id = fam.get_mother_handle()
if f_id:
self.init_ancestor_list(f_id, gen+1)
if m_id:
self.init_ancestor_list(m_id, gen+1)
def apply_real(self,db,person):
return person.handle in self.map
def reset(self):
self.map.clear()
| gpl-2.0 | 996,183,622,077,085,600 | 33.922222 | 79 | 0.538339 | false | 4.305479 | false | false | false |
vasyabigi/ds-flow | dsflow/tasks.py | 1 | 4015 | from __future__ import with_statement
import json
from datetime import datetime
from fabric.api import local, prompt, task, quiet
from fabric.colors import green, cyan, red
from fabric.contrib.console import confirm
from settings import GITHUB, UPSTREAM_ONLY, BRANCH_FORMAT_STRING, GIT_REMOTE_NAME, GIT_DEFAULT_BASE
from utils import get_commit_message, get_branch_name, post
@task(alias="ci")
def commit(message=None, amend=False, add_first=False):
git_status = local('git status --short', capture=True)
if not git_status:
print(cyan('Nothing to commit.'))
return
if add_first:
local("git add .")
print(cyan('Review git status:'))
local('git status --short')
prompt(cyan('Press <Enter> to continue or <Ctrl+C> to cancel.'))
# Default command
command = 'git commit'
if amend:
command += " --amend"
else:
# Check if message present
while not message:
message = prompt(green("Enter commit message: "))
command += ' -m "%s"' % get_commit_message(message=message)
if not local("git diff --cached", capture=True):
print(red("Your commit is empty. Please add something and try again."))
else:
local(command)
if amend:
print(cyan("Commited with amend."))
else:
print(cyan("Commited with message: " + get_commit_message(message=message)))
@task
def push(force=False, need_rebase=False, base=GIT_DEFAULT_BASE):
if need_rebase:
rebase()
print(cyan("Pushing..."))
if UPSTREAM_ONLY:
command = 'git push %s %s:%s' % (
GIT_REMOTE_NAME, get_branch_name(), base)
else:
command = 'git push origin %s' % get_branch_name()
# Check if force commit is necessary
if force:
command += " --force"
local(command)
print(cyan("Pushed."))
@task(alias='pr')
def pull_request(message=None, base=GIT_DEFAULT_BASE):
print(cyan("Sending pull request to %s/%s." % (GIT_REMOTE_NAME, base)))
if confirm(green('Default message: %s' % get_commit_message(message=message))):
title = get_commit_message(message=message)
else:
title = get_commit_message(message=prompt(green("Enter message: ")))
data = {
"title": title,
"body": "",
"head": "{user}:{branch}".format(user=GITHUB['user'], branch=get_branch_name()),
"base": base
}
response = post(url=GITHUB['urls']['pull_request'], data=json.dumps(data))
if response.status_code == 201:
print(cyan("Pull Request was sent to %s/%s." % (GIT_REMOTE_NAME, base)))
elif response.status_code == 422:
print(cyan("Pull-request was sent before."))
else:
print(response)
@task
def reset(base=GIT_DEFAULT_BASE):
local("git fetch %s" % GIT_REMOTE_NAME)
local("git reset --hard %s/%s" % (GIT_REMOTE_NAME, base))
@task
def rebase(base=GIT_DEFAULT_BASE):
print(cyan("Rebasing..."))
local("git fetch %s" % GIT_REMOTE_NAME)
local("git rebase %s/%s" % (GIT_REMOTE_NAME, base))
print(cyan("Rebase finished."))
@task
def change(number, branch_format_string=BRANCH_FORMAT_STRING, base=GIT_DEFAULT_BASE):
with quiet():
branch_name = branch_format_string.format(
datetime=datetime.now(), branch_name=number)
local("git branch %s" % branch_name)
local("git checkout %s" % branch_name)
print(cyan("Changed to %s." % get_branch_name()))
if confirm(green("Do you want to reset current branch?")):
reset(base=base)
print(cyan("Got last changes from %s." % GIT_REMOTE_NAME))
@task
def finish(message=None, force=False, need_rebase=False, add_first=False, base=GIT_DEFAULT_BASE):
commit(message=message, add_first=add_first)
push(force=force, need_rebase=False, base=base)
if not UPSTREAM_ONLY:
pull_request(message=message, base=base)
@task
def fix(base=GIT_DEFAULT_BASE):
change(number="quick-fix", prefix="", base=base)
| mit | -3,058,465,067,527,867,000 | 27.076923 | 99 | 0.628892 | false | 3.50655 | false | false | false |
orezahc/IRProjectAPIServer | app/main.py | 1 | 1759 | # Imports
import asyncio
import tornado
import tornado.web
import tornado.platform.asyncio
import googlehandler
import yelphandler
import bunyan
import logging
import os
import sys
import argparse
import googlehandler
import yelphandler
_PRETTY_FORMAT = '%(asctime)s :: %(levelname)s :: %(name)s :: %(message)s'
_logger = logging.getLogger(__name__)
# Setup
def _setup(key):
'''
Sets up web routes handler.
'''
# Set up logger
logHandler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(_PRETTY_FORMAT)
logHandler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(logHandler)
logger.setLevel(10)
# Set up tornado to use the asyncio event loop.
mainLoop = tornado.platform.asyncio.AsyncIOMainLoop().install()
ioloop = asyncio.get_event_loop()
print(key)
app = tornado.web.Application([
(r"/api/google/(?P<restaurantName>.*)", googlehandler.GoogleHandler, dict(key=key)),
(r"/api/yelp/(?P<restaurantName>.*)", yelphandler.YelpHandler)
])
app.listen(80)
# Go!
logging.getLogger(__name__).info('Entering IO loop.')
ioloop.run_forever()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Search API'
)
parser.add_argument(
'-log-level',
type=int,
default=logging.INFO,
choices=[
logging.DEBUG,
logging.INFO,
logging.WARNING,
logging.ERROR,
logging.CRITICAL
],
help='The logging message threshold.'
)
parser.add_argument(
'-secret-key',
type=str,
help='Api key.'
)
args = parser.parse_args()
_setup(args.secret_key)
| mit | -5,078,912,192,931,598,000 | 21.844156 | 92 | 0.629335 | false | 3.832244 | false | false | false |
NitorCreations/nitor-deploy-tools | n_utils/cli.py | 1 | 46343 | # Copyright 2016-2017 Nitor Creations Oy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Command line tools for nitor-deploy-tools
"""
from __future__ import print_function
from builtins import input
from builtins import str
import argparse
import json
import locale
import os
import sys
import time
import re
import inspect
from datetime import datetime, timedelta
from dateutil.parser import parse
from dateutil.tz import tzutc
from inspect import trace, getframeinfo
from subprocess import PIPE, Popen
import argcomplete
import yaml
from argcomplete.completers import ChoicesCompleter, FilesCompleter
from pygments import highlight, lexers, formatters
from pygments.styles import get_style_by_name
from . import aws_infra_util
from . import cf_bootstrap
from . import cf_deploy
from . import cf_utils
from . import volumes
from .cf_utils import InstanceInfo, is_ec2, region, regions, stacks, \
stack_params_and_outputs, get_images, promote_image, \
share_to_another_region, set_region, register_private_dns, interpolate_file, \
assumed_role_name
from .cloudfront_utils import distributions, distribution_comments, \
upsert_cloudfront_records
from n_utils.ecr_utils import ensure_repo, repo_uri
from n_utils.log_events import CloudWatchLogsGroups, CloudFormationEvents, CloudWatchLogsThread
from n_utils.maven_utils import add_server
from n_utils.mfa_utils import mfa_add_token, mfa_delete_token, mfa_generate_code, \
mfa_generate_code_with_secret, list_mfa_tokens, mfa_backup_tokens, mfa_decrypt_backup_tokens, \
mfa_to_qrcode
from n_utils.account_utils import list_created_accounts, create_account
from n_utils.aws_infra_util import load_parameters
from n_utils.ndt import find_include, find_all_includes, include_dirs
from n_utils.profile_util import update_profile, print_profile
from n_utils.ndt_project import list_jobs, list_components
from n_utils.git_utils import Git
from n_utils.ndt_project import Project
SYS_ENCODING = locale.getpreferredencoding()
NoneType = type(None)
def get_parser(formatter=None):
func_name = inspect.stack()[1][3]
caller = sys._getframe().f_back
func = caller.f_locals.get(
func_name, caller.f_globals.get(
func_name
)
)
if formatter:
return argparse.ArgumentParser(formatter_class=formatter, description=func.__doc__)
else:
return argparse.ArgumentParser(description=func.__doc__)
def list_file_to_json():
""" Convert a file with an entry on each line to a json document with
a single element (name as argument) containg file rows as list.
"""
parser = get_parser()
parser.add_argument("arrayname", help="The name in the json object given" +
"to the array").completer = \
ChoicesCompleter(())
parser.add_argument("file", help="The file to parse").completer = \
FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
content = [line.rstrip('\n') for line in open(args.file)]
json.dump({args.arrayname: content}, sys.stdout)
def add_deployer_server():
"""Add a server into a maven configuration file. Password is taken from the
environment variable 'DEPLOYER_PASSWORD'
"""
parser = get_parser()
parser.add_argument("file", help="The file to modify").completer = \
FilesCompleter()
parser.add_argument("username",
help="The username to access the server.").completer = \
ChoicesCompleter(())
parser.add_argument("--id", help="Optional id for the server. Default is" +
" deploy. One server with this id is " +
"added and another with '-release' " +
"appended", default="deploy").completer = \
ChoicesCompleter(())
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
add_server(args.file, args.id, args.username)
add_server(args.file, args.id + "-release", args.username)
def get_userdata():
"""Get userdata defined for an instance into a file
"""
parser = get_parser()
parser.add_argument("file", help="File to write userdata into").completer =\
FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
dirname = os.path.dirname(args.file)
if dirname:
if os.path.isfile(dirname):
parser.error(dirname + " exists and is a file")
elif not os.path.isdir(dirname):
os.makedirs(dirname)
cf_utils.get_userdata(args.file)
return
def get_account_id():
"""Get current account id. Either from instance metadata or current cli
configuration.
"""
parser = get_parser()
parser.parse_args()
print(cf_utils.resolve_account())
def colorprint(data, output_format="yaml"):
""" Colorized print for either a yaml or a json document given as argument
"""
lexer = lexers.get_lexer_by_name(output_format)
formatter = formatters.get_formatter_by_name("256")
formatter.__init__(style=get_style_by_name('emacs'))
colored = highlight(str(data, 'UTF-8'), lexer, formatter)
sys.stdout.write(colored)
def yaml_to_json():
"""Convert Nitor CloudFormation yaml to CloudFormation json with some
preprosessing
"""
parser = get_parser()
parser.add_argument("--colorize", "-c", help="Colorize output", action="store_true")
parser.add_argument("--merge", "-m", help="Merge other yaml files to the main file", nargs="*")
parser.add_argument("--small", "-s", help="Compact representration of json", action="store_true")
parser.add_argument("file", help="File to parse").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
doc = aws_infra_util.yaml_to_dict(args.file, merge=args.merge)
if args.small:
dump = lambda out_doc: json.dumps(out_doc)
else:
dump = lambda out_doc: json.dumps(out_doc, indent=2)
if args.colorize:
colorprint(dump(doc), output_format="json")
else:
print(dump(doc))
def yaml_to_yaml():
""" Do ndt preprocessing for a yaml file
"""
parser = get_parser()
parser.add_argument("--colorize", "-c", help="Colorize output", action="store_true")
parser.add_argument("file", help="File to parse").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
doc = aws_infra_util.yaml_to_yaml(args.file)
if args.colorize:
colorprint(doc)
else:
print(doc)
def json_to_yaml():
"""Convert CloudFormation json to an approximation of a Nitor CloudFormation
yaml with for example scripts externalized
"""
parser = get_parser()
parser.add_argument("--colorize", "-c", help="Colorize output",
action="store_true")
parser.add_argument("file", help="File to parse").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
doc = aws_infra_util.json_to_yaml(args.file)
if args.colorize:
colorprint(doc)
else:
print(doc)
def read_and_follow():
"""Read and print a file and keep following the end for new data
"""
parser = get_parser()
parser.add_argument("file", help="File to follow").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
cf_utils.read_and_follow(args.file, sys.stdout.write)
def logs_to_cloudwatch():
"""Read a file and send rows to cloudwatch and keep following the end for new data.
The log group will be the stack name that created instance and the logstream
will be the instance id and filename.
"""
parser = get_parser()
parser.add_argument("file", help="File to follow").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
cf_utils.send_logs_to_cloudwatch(args.file)
def signal_cf_status():
"""Signal CloudFormation status to a logical resource in CloudFormation
that is either given on the command line or resolved from CloudFormation
tags
"""
parser = get_parser()
parser.add_argument("status",
help="Status to indicate: SUCCESS | FAILURE").completer\
= ChoicesCompleter(("SUCCESS", "FAILURE"))
parser.add_argument("-r", "--resource", help="Logical resource name to " +
"signal. Looked up from " +
"cloudformation tags by " +
"default")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.status != "SUCCESS" and args.status != "FAILURE":
parser.error("Status needs to be SUCCESS or FAILURE")
cf_utils.signal_status(args.status, resource_name=args.resource)
def associate_eip():
"""Associate an Elastic IP for the instance that this script runs on
"""
parser = get_parser()
parser.add_argument("-i", "--ip", help="Elastic IP to allocate - default" +
" is to get paramEip from the stack" +
" that created this instance")
parser.add_argument("-a", "--allocationid", help="Elastic IP allocation " +
"id to allocate - " +
"default is to get " +
"paramEipAllocationId " +
"from the stack " +
"that created this instance")
parser.add_argument("-e", "--eipparam", help="Parameter to look up for " +
"Elastic IP in the stack - " +
"default is paramEip",
default="paramEip")
parser.add_argument("-p", "--allocationidparam", help="Parameter to look" +
" up for Elastic " +
"IP Allocation ID " +
"in the stack - " +
"default is " +
"paramEipAllocatio" +
"nId",
default="paramEipAllocationId")
argcomplete.autocomplete(parser)
args = parser.parse_args()
cf_utils.associate_eip(eip=args.ip, allocation_id=args.allocationid,
eip_param=args.eipparam,
allocation_id_param=args.allocationidparam)
def instance_id():
""" Get id for instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.instance_id())
else:
sys.exit(1)
def ec2_region():
""" Get default region - the region of the instance if run in an EC2 instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
print(region())
def tag():
""" Get the value of a tag for an ec2 instance
"""
parser = get_parser()
parser.add_argument("name", help="The name of the tag to get")
args = parser.parse_args()
argcomplete.autocomplete(parser)
if is_ec2():
info = InstanceInfo()
value = info.tag(args.name)
if value is not None:
print(value)
else:
sys.exit("Tag " + args.name + " not found")
else:
parser.error("Only makes sense on an EC2 instance")
def stack_name():
""" Get name of the stack that created this instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.stack_name())
else:
parser.error("Only makes sense on an EC2 instance cretated from a CF stack")
def stack_id():
""" Get id of the stack the creted this instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.stack_id())
else:
parser.error("Only makes sense on an EC2 instance cretated from a CF stack")
def logical_id():
""" Get the logical id that is expecting a signal from this instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.logical_id())
else:
parser.error("Only makes sense on an EC2 instance cretated from a CF stack")
def cf_region():
""" Get region of the stack that created this instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.stack_id().split(":")[3])
else:
parser.error("Only makes sense on an EC2 instance cretated from a CF stack")
def update_stack():
""" Create or update existing CloudFormation stack
"""
parser = argparse.ArgumentParser(description="Create or update existing " +
"CloudFormation stack")
parser.add_argument("stack_name", help="Name of the stack to create or " +
"update")
parser.add_argument("yaml_template", help="Yaml template to pre-process " +
"and use for creation")
parser.add_argument("region", help="The region to deploy the stack to")
parser.add_argument("-d", "--dry-run", action="store_true",
help="Do not actually deploy anything, but just " +
"assemble the json and associated parameters")
args = parser.parse_args()
if not os.path.isfile(args.yaml_template):
parser.error(args.yaml_template + " not found")
cf_deploy.deploy(args.stack_name, args.yaml_template, args.region,
args.dry_run)
return
def delete_stack():
"""Delete an existing CloudFormation stack
"""
parser = get_parser()
parser.add_argument("stack_name", help="Name of the stack to delete")
parser.add_argument("region", help="The region to delete the stack from")
args = parser.parse_args()
cf_deploy.delete(args.stack_name, args.region)
return
def tail_stack_logs():
"""Tail logs from the log group of a cloudformation stack
"""
parser = get_parser()
parser.add_argument("stack_name", help="Name of the stack to watch logs " +
"for")
parser.add_argument("-s", "--start", help="Start time in seconds since " +
"epoc")
argcomplete.autocomplete(parser)
args = parser.parse_args()
cwlogs = CloudWatchLogsThread(args.stack_name, start_time=args.start)
cwlogs.start()
cfevents = CloudFormationEvents(args.stack_name, start_time=args.start)
cfevents.start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
print('Closing...')
cwlogs.stop()
cfevents.stop()
return
def get_logs():
"""Get logs from multiple CloudWatch log groups and possibly filter them.
"""
parser = get_parser()
parser.add_argument("log_group_pattern", help="Regular expression to filter log groups with")
parser.add_argument("-f", "--filter", help="CloudWatch filter pattern")
parser.add_argument("-s", "--start", help="Start time (x m|h|d|w ago | now | <seconds since epoc>)", nargs="+")
parser.add_argument("-e", "--end", help="End time (x m|h|d|w ago | now | <seconds since epoc>)", nargs="+")
parser.add_argument("-o", "--order", help="Best effort ordering of log entries", action="store_true")
parser.usage = "ndt logs log_group_pattern [-h] [-f FILTER] [-s START [START ...]] [-e END [END ...]] [-o]"
argcomplete.autocomplete(parser)
args = parser.parse_args()
cwlogs_groups = CloudWatchLogsGroups(
log_group_filter=args.log_group_pattern,
log_filter=args.filter,
start_time=' '.join(args.start) if args.start else None,
end_time=' '.join(args.end) if args.end else None,
sort=args.order
)
cwlogs_groups.get_logs()
def resolve_include():
"""Find a file from the first of the defined include paths
"""
parser = get_parser()
parser.add_argument("file", help="The file to find")
argcomplete.autocomplete(parser)
args = parser.parse_args()
inc_file = find_include(args.file)
if not inc_file:
parser.error("Include " + args.file + " not found on include paths " +
str(include_dirs))
print(inc_file)
def resolve_all_includes():
"""Find a file from the first of the defined include paths
"""
parser = get_parser()
parser.add_argument("pattern", help="The file pattern to find")
argcomplete.autocomplete(parser)
args = parser.parse_args()
inc_file = find_all_includes(args.pattern)
if not inc_file:
parser.error("Include " + args.pattern + " not found on include paths " +
str(include_dirs))
for next_file in inc_file:
print(next_file)
def assume_role():
"""Assume a defined role. Prints out environment variables
to be eval'd to current context for use:
eval $(ndt assume-role 'arn:aws:iam::43243246645:role/DeployRole')
"""
parser = get_parser()
parser.add_argument("role_arn", help="The ARN of the role to assume")
parser.add_argument("-t", "--mfa-token", metavar="TOKEN_NAME",
help="Name of MFA token to use", required=False)
parser.add_argument("-d", "--duration", help="Duration for the session in minutes",
default="60", type=int, required=False)
parser.add_argument("-p", "--profile", help="Profile to edit in ~/.aws/credentials " + \
"to make role persist in that file for " + \
"the duration of the session.", required=False)
argcomplete.autocomplete(parser)
args = parser.parse_args()
creds = cf_utils.assume_role(args.role_arn, mfa_token_name=args.mfa_token,
duration_minutes=args.duration)
if args.profile:
update_profile(args.profile, creds)
else:
print("AWS_ROLE_ARN=\"" + args.role_arn + "\"")
print("AWS_ACCESS_KEY_ID=\"" + creds['AccessKeyId'] + "\"")
print("AWS_SECRET_ACCESS_KEY=\"" + creds['SecretAccessKey'] + "\"")
print("AWS_SESSION_TOKEN=\"" + creds['SessionToken'] + "\"")
print("AWS_SESSION_EXPIRATION=\"" + creds['Expiration'].strftime("%a, %d %b %Y %H:%M:%S +0000") + "\"")
print("export AWS_ROLE_ARN AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN AWS_SESSION_EXPIRATION")
def get_parameter():
"""Get a parameter value from the stack
"""
parser = get_parser()
parser.add_argument("parameter", help="The name of the parameter to print")
argcomplete.autocomplete(parser)
args = parser.parse_args()
info = InstanceInfo()
print(info.stack_data(args.parameter))
def volume_from_snapshot():
""" Create a volume from an existing snapshot and mount it on the given
path. The snapshot is identified by a tag key and value. If no tag is
found, an empty volume is created, attached, formatted and mounted.
"""
parser = get_parser()
parser.add_argument("tag_key", help="Key of the tag to find volume with")
parser.add_argument("tag_value", help="Value of the tag to find volume with")
parser.add_argument("mount_path", help="Where to mount the volume")
parser.add_argument("size_gb", nargs="?", help="Size in GB for the volum" +
"e. If different from sna" +
"pshot size, volume and " +
"filesystem are resized",
default=None, type=int)
parser.add_argument("-n", "--no_delete_on_termination",
help="Whether to skip deleting the volume on termi" +
"nation, defaults to false", action="store_true")
parser.add_argument("-c", "--copytags", nargs="*", help="Tag to copy to the volume from instance. Multiple values allowed.")
parser.add_argument("-t", "--tags", nargs="*", help="Tag to add to the volume in the format name=value. Multiple values allowed.")
argcomplete.autocomplete(parser)
args = parser.parse_args()
tags = {}
if args.tags:
for tag in args.tags:
try:
key, value = tag.split('=', 1)
tags[key] = value
except ValueError:
parser.error("Invalid tag/value input: " + tag)
if is_ec2():
volumes.volume_from_snapshot(args.tag_key, args.tag_value, args.mount_path,
size_gb=args.size_gb,
del_on_termination=not args.no_delete_on_termination,
copytags=args.copytags, tags=tags)
else:
parser.error("Only makes sense on an EC2 instance")
def snapshot_from_volume():
""" Create a snapshot of a volume identified by it's mount path
"""
parser = get_parser()
parser.add_argument("-w", "--wait", help="Wait for the snapshot to finish" +
" before returning",
action="store_true")
parser.add_argument("tag_key", help="Key of the tag to find volume with")
parser.add_argument("tag_value", help="Value of the tag to find volume with")
parser.add_argument("mount_path", help="Where to mount the volume")
parser.add_argument("-c", "--copytags", nargs="*", help="Tag to copy to the snapshot from instance. Multiple values allowed.")
parser.add_argument("-t", "--tags", nargs="*", help="Tag to add to the snapshot in the format name=value. Multiple values allowed.")
argcomplete.autocomplete(parser)
args = parser.parse_args()
tags = {}
if args.tags:
for tag in args.tags:
try:
key, value = tag.split('=', 1)
tags[key] = value
except ValueError:
parser.error("Invalid tag/value input: " + tag)
if is_ec2():
print(volumes.create_snapshot(args.tag_key, args.tag_value,
args.mount_path, wait=args.wait, tags=tags, copytags=args.copytags))
else:
parser.error("Only makes sense on an EC2 instance")
def detach_volume():
""" Create a snapshot of a volume identified by it's mount path
"""
parser = get_parser()
parser.add_argument("mount_path", help="Where to mount the volume")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if is_ec2():
volumes.detach_volume(args.mount_path)
else:
parser.error("Only makes sense on an EC2 instance")
def clean_snapshots():
"""Clean snapshots that are older than a number of days (30 by default) and
have one of specified tag values
"""
parser = get_parser()
parser.add_argument("-r", "--region", help="The region to delete " +
"snapshots from. Can also be " +
"set with env variable " +
"AWS_DEFAULT_REGION or is " +
"gotten from instance " +
"metadata as a last resort")
parser.add_argument("-d", "--days", help="The number of days that is the" +
"minimum age for snapshots to " +
"be deleted", type=int, default=30)
parser.add_argument("tags", help="The tag values to select deleted " +
"snapshots", nargs="+")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.region:
os.environ['AWS_DEFAULT_REGION'] = args.region
volumes.clean_snapshots(args.days, args.tags)
def setup_cli():
"""Setup the command line environment to define an aws cli profile with
the given name and credentials. If an identically named profile exists,
it will not be overwritten.
"""
parser = get_parser()
parser.add_argument("-n", "--name", help="Name for the profile to create")
parser.add_argument("-k", "--key-id", help="Key id for the profile")
parser.add_argument("-s", "--secret", help="Secret to set for the profile")
parser.add_argument("-r", "--region", help="Default region for the profile")
argcomplete.autocomplete(parser)
args = parser.parse_args()
cf_bootstrap.setup_cli(**vars(args))
def show_stack_params_and_outputs():
""" Show stack parameters and outputs as a single json documents
"""
parser = get_parser()
parser.add_argument("-r", "--region", help="Region for the stack to show",
default=region()).completer = ChoicesCompleter(regions())
parser.add_argument("-p", "--parameter", help="Name of paremeter if only" +
" one parameter required")
parser.add_argument("stack_name", help="The stack name to show").completer = \
ChoicesCompleter(stacks())
argcomplete.autocomplete(parser)
args = parser.parse_args()
resp = stack_params_and_outputs(args.region, args.stack_name)
if args.parameter:
if args.parameter in resp:
print(resp[args.parameter])
else:
parser.error("Parameter " + args.parameter + " not found")
else:
print(json.dumps(resp, indent=2))
def cli_get_images():
""" Gets a list of images given a bake job name
"""
parser = get_parser()
parser.add_argument("job_name", help="The job name to look for")
argcomplete.autocomplete(parser)
args = parser.parse_args()
set_region()
images = get_images(args.job_name)
for image in images:
print(image['ImageId'] + ":" + image['Name'])
def cli_promote_image():
""" Promotes an image for use in another branch
"""
parser = get_parser()
parser.add_argument("image_id", help="The image to promote")
parser.add_argument("target_job", help="The job name to promote the image to")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if ":" in args.image_id:
args.image_id = args.image_id.split(":")[0]
promote_image(args.image_id, args.target_job)
def cli_share_to_another_region():
""" Shares an image to another region for potentially another account
"""
parser = get_parser()
parser.add_argument("ami_id", help="The ami to share")
parser.add_argument("to_region", help="The region to share to").completer =\
ChoicesCompleter(regions())
parser.add_argument("ami_name", help="The name for the ami")
parser.add_argument("account_id", nargs="+", help="The account ids to sh" +
"are ami to")
argcomplete.autocomplete(parser)
args = parser.parse_args()
share_to_another_region(args.ami_id, args.to_region, args.ami_name,
args.account_id)
def cli_register_private_dns():
""" Register local private IP in route53 hosted zone usually for internal
use.
"""
parser = get_parser()
parser.add_argument("dns_name", help="The name to update in route 53")
parser.add_argument("hosted_zone", help="The name of the hosted zone to update")
argcomplete.autocomplete(parser)
args = parser.parse_args()
register_private_dns(args.dns_name, args.hosted_zone)
def cli_interpolate_file():
""" Replace placeholders in file with parameter values from stack and
optionally from vault
"""
parser = get_parser()
parser.add_argument("-s", "--stack", help="Stack name for values. " +
"Automatically resolved on ec2" +
" instances")
parser.add_argument("-v", "--vault", help="Use vault values as well." +
"Vault resovled from env " +
"variables or default is used",
action="store_true")
parser.add_argument("-o", "--output", help="Output file")
parser.add_argument("-e", "--encoding", help="Encoding to use for the " +
"file. Defaults to utf-8",
default='utf-8')
parser.add_argument("file", help="File to interpolate").completer = \
FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
interpolate_file(args.file, stack_name=args.stack, use_vault=args.vault,
destination=args.output, encoding=args.encoding)
def cli_ecr_ensure_repo():
""" Ensure that an ECR repository exists and get the uri and login token for
it """
parser = get_parser()
parser.add_argument("name", help="The name of the ecr repository to verify")
argcomplete.autocomplete(parser)
args = parser.parse_args()
ensure_repo(args.name)
def cli_ecr_repo_uri():
""" Get the repo uri for a named docker """
parser = get_parser()
parser.add_argument("name", help="The name of the ecr repository")
argcomplete.autocomplete(parser)
args = parser.parse_args()
uri = repo_uri(args.name)
if not uri:
parser.error("Did not find uri for repo '" + args.name + "'")
else:
print(uri)
def cli_upsert_cloudfront_records():
""" Upsert Route53 records for all aliases of a CloudFront distribution """
parser = get_parser()
stack_select = parser.add_mutually_exclusive_group(required=True)
stack_select.add_argument("-i", "--distribution_id", help="Id for the " +
"distribution to " +
"upsert").completer = \
ChoicesCompleter(distributions())
stack_select.add_argument("-c", "--distribution_comment", help="Comment for the" +
" distribution " +
"to upsert").completer = \
ChoicesCompleter(distribution_comments())
parser.add_argument("-w", "--wait", help="Wait for request to sync", action="store_true")
argcomplete.autocomplete(parser)
args = parser.parse_args()
upsert_cloudfront_records(args)
def cli_mfa_add_token():
""" Adds an MFA token to be used with role assumption.
Tokens will be saved in a .ndt subdirectory in the user's home directory.
If a token with the same name already exists, it will not be overwritten."""
parser = get_parser()
parser.add_argument("token_name",
help="Name for the token. Use this to refer to the token later with " +
"the assume-role command.")
parser.add_argument("-i", "--interactive", help="Ask for token details interactively.",
action="store_true")
parser.add_argument("-a", "--token_arn", help="ARN identifier for the token.")
parser.add_argument("-s", "--token_secret", help="Token secret.")
parser.add_argument("-f", "--force", help="Force an overwrite if the token already exists.",
action="store_true")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.interactive:
args.token_secret = eval(input("Enter token secret: "))
code_1 = mfa_generate_code_with_secret(args.token_secret)
print("First sync code: " + code_1)
print("Waiting to generate second sync code. This could take 30 seconds...")
code_2 = mfa_generate_code_with_secret(args.token_secret)
while code_1 == code_2:
time.sleep(5)
code_2 = mfa_generate_code_with_secret(args.token_secret)
print("Second sync code: " + code_2)
args.token_arn = eval(input("Enter token ARN: "))
elif args.token_arn is None or args.token_secret is None:
parser.error("Both token_arn and token_secret are required when not adding interactively.")
try:
mfa_add_token(args)
except ValueError as error:
parser.error(error)
def cli_mfa_delete_token():
""" Deletes an MFA token file from the .ndt subdirectory in the user's
home directory """
parser = get_parser()
parser.add_argument("token_name",
help="Name of the token to delete.").completer = \
ChoicesCompleter(list_mfa_tokens())
argcomplete.autocomplete(parser)
args = parser.parse_args()
mfa_delete_token(args.token_name)
def cli_mfa_code():
""" Generates a TOTP code using an MFA token. """
parser = get_parser()
parser.add_argument("token_name",
help="Name of the token to use.").completer = \
ChoicesCompleter(list_mfa_tokens())
argcomplete.autocomplete(parser)
args = parser.parse_args()
print(mfa_generate_code(args.token_name))
def cli_mfa_to_qrcode():
""" Generates a QR code to import a token to other devices. """
parser = get_parser()
parser.add_argument("token_name",
help="Name of the token to use.").completer = \
ChoicesCompleter(list_mfa_tokens())
argcomplete.autocomplete(parser)
args = parser.parse_args()
mfa_to_qrcode(args.token_name)
def cli_mfa_backup_tokens():
""" Encrypt or decrypt a backup JSON structure of tokens.
To output an encrypted backup, provide an encryption secret.
To decrypt an existing backup, use --decrypt <file>.
"""
parser = get_parser()
parser.add_argument("backup_secret",
help="Secret to use for encrypting or decrypts the backup.")
parser.add_argument("-d",
"--decrypt",
help="Outputs a decrypted token backup read from given file.",
nargs=1,
metavar="FILE")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.decrypt:
print(mfa_decrypt_backup_tokens(args.backup_secret, args.decrypt[0]))
else:
print(mfa_backup_tokens(args.backup_secret))
def cli_create_account():
""" Creates a subaccount. """
parser = get_parser()
parser.add_argument("email", help="Email for account root")
parser.add_argument("account_name", help="Organization unique account name")
parser.add_argument("-d", "--deny-billing-access", action="store_true")
parser.add_argument("-o", "--organization-role-name", help="Role name for " +
"admin access from" +
" parent account",
default="OrganizationAccountAccessRole")
parser.add_argument("-r", "--trust-role-name", help="Role name for admin " +
"access from parent account",
default="TrustedAccountAccessRole")
parser.add_argument("-a", "--trusted-accounts", nargs="*",
help="Account to trust with user management").completer = \
ChoicesCompleter(list_created_accounts())
parser.add_argument("-t", "--mfa-token", metavar="TOKEN_NAME",
help="Name of MFA token to use", required=False)
argcomplete.autocomplete(parser)
args = parser.parse_args()
create_account(args.email, args.account_name, role_name=args.organization_role_name,
trust_role=args.trust_role_name, access_to_billing=not args.deny_billing_access,
trusted_accounts=args.trusted_accounts, mfa_token=args.mfa_token)
def cli_load_parameters():
""" Load parameters from infra*.properties files in the order:
infra.properties,
infra-[branch].properties,
[component]/infra.properties,
[component]/infra-[branch].properties,
[component]/[subcomponent-type]-[subcomponent]/infra.properties,
[component]/[subcomponent-type]-[subcomponent]/infra-[branch].properties
Last parameter defined overwrites ones defined before in the files. Supports parameter expansion
and bash -like transformations. Namely:
${PARAM##prefix} # strip prefix greedy
${PARAM%%suffix} # strip suffix greedy
${PARAM#prefix} # strip prefix not greedy
${PARAM%suffix} # strip suffix not greedy
${PARAM:-default} # default if empty
${PARAM:4:2} # start:len
${PARAM/substr/replace}
${PARAM^} # upper initial
${PARAM,} # lower initial
${PARAM^^} # upper
${PARAM,,} # lower
Comment lines start with '#'
Lines can be continued by adding '\' at the end
See https://www.tldp.org/LDP/Bash-Beginners-Guide/html/sect_10_03.html
(arrays not supported)
"""
parser = get_parser(formatter=argparse.RawDescriptionHelpFormatter)
parser.add_argument("component", nargs="?", help="Compenent to descend into").completer = \
ChoicesCompleter([c.name for c in Project().get_components()])
parser.add_argument("--branch", "-b", help="Branch to get active parameters for").completer = \
ChoicesCompleter(Git().get_branches())
parser.add_argument("--resolve-images", "-r", action="store_true", help="Also resolve subcomponent AMI IDs and docker repo urls")
subcomponent_group = parser.add_mutually_exclusive_group()
subcomponent_group.add_argument("--stack", "-s", help="CloudFormation subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("stack", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--serverless", "-l", help="Serverless subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("serverless", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--docker", "-d", help="Docker image subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("docker", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--image", "-i", const="", nargs="?", help="AMI image subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("image", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--cdk", "-c", help="CDK subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("cdk", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--terraform", "-t", help="Terraform subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("terraform", prefix, parsed_args, **kwargs)
format_group = parser.add_mutually_exclusive_group()
format_group.add_argument("--json", "-j", action="store_true", help="JSON format output (default)")
format_group.add_argument("--yaml", "-y", action="store_true", help="YAML format output")
format_group.add_argument("--properties", "-p", action="store_true", help="properties file format output")
format_group.add_argument("--export-statements", "-e", action="store_true",
help="Output as eval-able export statements")
argcomplete.autocomplete(parser)
args = parser.parse_args()
transform = json.dumps
if args.export_statements:
transform = map_to_exports
if args.properties:
transform = map_to_properties
if args.yaml:
transform = yaml.dump
del args.export_statements
del args.yaml
del args.json
del args.properties
if (args.stack or args.serverless or args.docker or not isinstance(args.image, NoneType)) \
and not args.component:
parser.error("image, stack, doker or serverless do not make sense without component")
print(transform(load_parameters(**vars(args))), end="")
def component_typed_subcomponents(sc_type, prefix, parsed_args, **kwargs):
p_args = {}
if parsed_args.branch:
p_args["branch"] = parsed_args.branch
if parsed_args.component:
return [sc.name for sc in Project(**p_args).get_component(parsed_args.component).get_subcomponents() if sc.type == sc_type and sc.name.startswith(prefix)]
else:
return [sc.name for sc in Project(**p_args).get_all_subcomponents() if sc.type == sc_type]
return None
def map_to_exports(map):
""" Prints the map as eval-able set of environment variables. Keys
will be cleaned of all non-word letters and values will be escaped so
that they will be exported as literal values."""
ret = ""
keys = []
for key, val in list(map.items()):
key = re.sub("[^a-zA-Z0-9_]", "", key)
ret += key + "='" + val.replace("'", "'\"'\"'") + "'" + os.linesep
keys.append(key)
ret += "export " + " ".join(keys) + os.linesep
return ret
def map_to_properties(map):
""" Prints the map as loadable set of java properties. Keys
will be cleaned of all non-word letters."""
ret = ""
for key, val in list(map.items()):
key = re.sub("[^a-zA-Z0-9_]", "", key)
ret += key + "=" + val + os.linesep
return ret
def wait_for_metadata():
""" Waits for metadata service to be available. All errors are ignored until
time expires or a socket can be established to the metadata service """
parser = get_parser()
parser.add_argument('--timeout', '-t', type=int, help="Maximum time to wait in seconds for the metadata service to be available", default=300)
argcomplete.autocomplete(parser)
args = parser.parse_args()
start = datetime.utcnow().replace(tzinfo=tzutc())
cutoff = start + timedelta(seconds=args.timeout)
timeout = args.timeout
connected = False
while not connected:
try:
connected = cf_utils.wait_net_service("169.254.169.254", 80, timeout)
except:
pass
if datetime.utcnow().replace(tzinfo=tzutc()) >= cutoff:
print("Timed out waiting for metadata service")
sys.exit(1)
time.sleep(1)
timeout = max(1, args.timeout - (datetime.utcnow().replace(tzinfo=tzutc()) - start).total_seconds())
def cli_assumed_role_name():
""" Read the name of the assumed role if currently defined """
parser = get_parser()
argcomplete.autocomplete(parser)
_ = parser.parse_args()
print(assumed_role_name())
def cli_list_jobs():
""" Prints a line for every runnable job in this git repository, in all branches and
optionally exports the properties for each under '$root/job-properties/"""
parser = get_parser()
parser.add_argument("-e", "--export-job-properties", action="store_true",
help="Set if you want the properties of all jobs into files under job-properties/")
parser.add_argument("-j", "--json", action="store_true", help="Print in json format. Optionally " \
"exported parameters will be in the json document")
parser.add_argument("-b", "--branch", help="The branch to process. Default is to process all branches").completer = \
ChoicesCompleter(Git().get_branches())
parser.add_argument("-c", "--component", help="Component to process. Default is to process all components").completer = \
branch_components
argcomplete.autocomplete(parser)
args = parser.parse_args()
ret = list_jobs(**vars(args))
if args.json:
print(json.dumps(ret, indent=2))
else:
print("\n".join(ret))
def branch_components(prefix, parsed_args, **kwargs):
if parsed_args.branch:
return [c.name for c in Project(branch=parsed_args.branch).get_components()]
else:
return [c.name for c in Project().get_components()]
def cli_list_components():
""" Prints the components in a branch, by default the current branch """
parser = get_parser()
parser.add_argument("-j", "--json", action="store_true", help="Print in json format.")
parser.add_argument("-b", "--branch", help="The branch to get components from. Default is to process current branch").completer = \
ChoicesCompleter(Git().get_branches())
argcomplete.autocomplete(parser)
args = parser.parse_args()
ret = list_components(**vars(args))
if args.json:
print(json.dumps(ret, indent=2))
else:
print("\n".join(ret))
| apache-2.0 | 3,418,491,037,571,517,000 | 41.517431 | 162 | 0.610491 | false | 4.062681 | false | false | false |
jeliasherrero/SeminarioTheano2015 | convTheano.py | 1 | 7606 | """
Diseno de LeNET (red convolucional) para reconocer los digitos del fichero de matlab digits.mat
"""
import time
import scipy.io as io
import numpy
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
# Importamos las clases que ya hemos definido en MLP
from mlp import CapaOculta, LogisticRegression
# Creamos la capa LeNet convolucional
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
learning_rate=0.1
n_epochs=200
dataset='digits.mat'
nkerns=[10, 25]
batch_size=5000
rng = numpy.random.RandomState(23455)
# Cargamos los datos
print '... cargando datos'
data=io.loadmat(dataset,squeeze_me=True)
dataIn=data['X']
dataOut = data['y']
train_set_x = theano.shared(numpy.asarray(dataIn,
dtype=theano.config.floatX),borrow=True)
train_set_y = T.cast(theano.shared(numpy.asarray(dataOut,
dtype=theano.config.floatX),borrow=True),'int32')
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
index = T.iscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# Reshape matrix of rasterized images of shape (batch_size, 20 * 20)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (20, 20) is the size of MNIST images.
layer0_input = x.reshape((batch_size, 1, 20, 20))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, 20, 20),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(1, 1)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 16, 16),
filter_shape=(nkerns[1], nkerns[0], 3, 3),
poolsize=(2, 2)
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = CapaOculta(
rng,
input=layer2_input,
n_in=nkerns[1] * 7 * 7,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
cost = layer3.negative_log_likelihood(y)
# create a list of all model parameters to be fit by gradient descent
params = layer3.params + layer2.params + layer1.params + layer0.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i], grads[i]) pairs.
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
print train_set_x.dtype
print index.dtype
print y.dtype
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
print dataOut
print '... training'
start_time = time.clock()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
print "Epoca: ", repr(epoch)
for minibatch_index in xrange(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print 'training @ iter = ', iter
cost_ij = train_model(minibatch_index)
end_time = time.clock()
print "Tiempo de ejecucion es de %.2fm" % ((end_time-start_time) / 60.)
predict = theano.function(
inputs=[index],
outputs=layer3.y_pred,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size]
}
)
test = [predict(i) for i
in xrange(n_train_batches)]
real = [dataOut for i
in xrange(n_train_batches)]
print test
print real | gpl-2.0 | -3,203,256,579,341,251,600 | 30.962185 | 95 | 0.643571 | false | 3.346238 | false | false | false |
ajavadia/ScaffCC | scaffold/flatten-qasm.py | 1 | 8016 | import argparse
import re
def process_qasm(fname):
qgates = ['H','X','CNOT','Y','Z','S','T','Tdag','Sdag','Rz','PrepX','PrepZ','MeasX','MeasZ','Toffoli','Fredkin']
qgates_1 = ['H','X','Y','Z','S','T','Tdag']
qgates_1a = ['Sdag']
qgates_2 = ['CNOT']
qgates_3 = ['Toffoli','Fredkin']
qgates_4 = ['PrepX','PrepZ']
qgates_5 = ['MeasX','MeasZ']
qgates_6 = ['Rz']
qgates_7 = ['afree']
gateNames = {
'H':'H',
'X':'X',
'Y':'Y',
'Z':'Z',
'S':'S',
'T':'T',
'Sdag':'Sdag',
'Tdag':'Tdag',
'PrepX':'PrepX', #'Px',
'PrepZ':'PrepZ', #'Pz',
'MeasZ':'MeasZ', #'Mz',
'MeasX':'MeasX', #'Mx',
'Rz':'Rz',
'CNOT':'CNOT', #'CX',
'Toffoli':'Tof',
'Fredkin':'Fredkin',
'afree':'afree'
}
pattern_qbit_decl = re.compile(r"\s*\bqbit\b\s+(?P<qbit_var>\w+)\s*\[\s*(?P<array_size>\d+)\s*\]\s*;")
pattern_cbit_decl = re.compile(r"\s*\bcbit\b\s+(?P<qbit_var>\w+)\s*\[\s*(?P<array_size>\d+)\s*\]\s*;")
pattern_qg = re.compile(r"\s*((\w+|\w+\[(.*?)\])\s*\=)*\s*(?P<func_name>\w+)\s*\(\s*(?P<array_size>(.*?))\s*\)\s*;")
pattern_qbit_arg = re.compile(r"(.*?)\((.*?)\bqbit\b\s*(.*?)\)(.*?)")
pattern_meas = re.compile(r"\s*(?P<func_ret>(\w+|\w+\[(.*?)\])\s*\=)*\s*(\bqg_MeasX|qg_MeasZ\b)\s*\(\s*(?P<array_size>(.*?))\s*\)\s*;")
pattern_main = re.compile(r"\s*(\bvoid|module\b)\s+(\bmain|main1\b)\s*\((.*?)\)\s*(\{)*\s*")
pattern_comment = re.compile(r"\s*//--//--(.*?)--//--//\s*")
fout_name = re.sub('\.qasmh$','_qasm.scaffold',fname)
fout = open(fout_name,'w')
fout.write('#include<stdio.h>\n')
#add instrumentation functions
for q in qgates_1:
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a){ printf("' +gateNames[q] +' %s\\n",a); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_1a: #Sdag = S^3
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a){ printf("S %s\\n",a); printf("S %s\\n",a); printf("S %s\\n",a); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_2: #CNOT => CX (target,control)
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a, char* b){ printf("'+gateNames[q]+' %s,%s\\n",a,b); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_3:
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a, char* b, char* c){ printf("' +gateNames[q] +' %s,%s,%s\\n",a,b,c); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_4: #PrepZ, PrepX
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a, int i){ printf("' +gateNames[q] +' %s\\n",a); '
fout.write(fstr)
fstr = 'if(i==1){ printf("X %s\\n",a); } }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_5: #MeasX, MeasZ
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a){ printf("' +gateNames[q] +' %s\\n",a); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_6:
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a, double b){ printf("' +gateNames[q] +' %s,%f\\n",a,b); }\n'
fout.write(fstr)
for q in qgates_7:
instFnName = q
fstr = 'void '+instFnName+'(char** a, int b ){ for(int i = 0; i < b; i++){ printf("' +gateNames[q] +' %s\\n",(*a)); a++; }}\n'
fout.write(fstr)
fout.write('\n')
#ignore contents until QASM Generation Pass
f = open(fname,'r')
b = 'Dummy Line'
while(b!=''):
if(b.find('QASM Generation Pass:')!=-1):
break
b = f.readline()
b = f.readline()
inMainFunc = False
setQbitDecl = []
setCbitDecl = []
while(b!=''):
if(b.find('End of QASM generation')!=-1):
break
#check for qbit declarations
m = re.match(pattern_main,b)
if(m):
inMainFunc = True
b = re.sub(r"\bvoid|module\b","int ",b)
m = re.match(pattern_qbit_decl,b)
if(m): #Matched qbit declaration
numElem = int(m.group('array_size'))
var = m.group('qbit_var')
addAlphabet=''
if(not inMainFunc):
addAlphabet='a' #add 'a' at end of ancilla declaration
subStr = "char* "+m.group('qbit_var')+'['+m.group('array_size')+'] = {'
fout.write(subStr)
for i in range(numElem-1):
varName = var+str(i)+addAlphabet
tmp = '"'+varName+'",'
if varName not in setQbitDecl:
setQbitDecl.append(varName)
fout.write(tmp)
varName = var+str(numElem-1)+addAlphabet
tmp = '"'+varName+'"'
if varName not in setQbitDecl:
setQbitDecl.append(varName)
fout.write(tmp)
fout.write('};\n')
else:
m = re.match(pattern_qg,b)
if(m): #Matched qauntum gate call
qstr = m.group('func_name')
if qstr in qgates:
rstr = 'qg_'+qstr
mystr = b.replace(qstr,rstr)
#check for Meas gates
m1 = re.match(pattern_meas,mystr)
if(m1):
retStr = m1.group('func_ret')
if(retStr):
mystr = mystr.replace(retStr,'')
fout.write(mystr)
else:
fout.write(b)
else:
#substitute qbit as char* in module definitions
m = re.match(pattern_qbit_arg,b)
if(m):
mystr = b
mystr = re.sub(r"\bqbit\b","char* ",mystr)
fout.write(mystr)
else:
m = re.match(pattern_cbit_decl,b)
if(m):
numElem = int(m.group('array_size'))
var = m.group('qbit_var')
subStr = "char* "+m.group('qbit_var')+'['+m.group('array_size')+'] = {'
fout.write(subStr)
for i in range(numElem-1):
tmp = '"'+var+str(i)+'",'
setCbitDecl.append(var+str(i))
fout.write(tmp)
tmp = '"'+var+str(numElem-1)+'"'
setCbitDecl.append(var+str(numElem-1))
fout.write(tmp)
fout.write('};\n')
else:
m = re.match(pattern_comment,b)
if(m):
subStr = 'printf("'+b.rstrip('\n')+'\\n");'
fout.write(subStr)
else:
#print 'Did not match any pattern:',b
fout.write(b)
b = f.readline()
f.close()
fout.close()
#write qbit and cbit declarations to file
fdecl = open("fdecl.out",'w')
for q in setQbitDecl:
myStr = 'qubit '+q+'\n'
fdecl.write(myStr)
for q in setCbitDecl:
myStr = 'cbit '+q+'\n'
fdecl.write(myStr)
fdecl.close()
parser = argparse.ArgumentParser(description='Convert QASM code into flattened QASM code')
parser.add_argument("input")
args = parser.parse_args()
process_qasm(args.input)
| bsd-2-clause | -2,705,765,999,966,634,500 | 31.322581 | 139 | 0.42228 | false | 3.208967 | false | false | false |
d2emon/newspaperizer | src/article/migrations/0011_auto_20160912_1900.py | 1 | 3451 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-12 19:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0010_auto_20160905_1127'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'ordering': ['issue', 'page', 'title'], 'verbose_name': 'article', 'verbose_name_plural': 'articles'},
),
migrations.AlterModelOptions(
name='articlecategory',
options={'verbose_name': 'category', 'verbose_name_plural': 'categories'},
),
migrations.AlterModelOptions(
name='articletype',
options={'verbose_name': 'article type', 'verbose_name_plural': 'article_types'},
),
migrations.AlterField(
model_name='article',
name='article_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='article.ArticleType', verbose_name='type'),
),
migrations.AlterField(
model_name='article',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='article.ArticleCategory', verbose_name='category'),
),
migrations.AlterField(
model_name='article',
name='description',
field=models.TextField(blank=True, max_length=10000, null=True, verbose_name='description'),
),
migrations.AlterField(
model_name='article',
name='issue',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='newspaper.Issue', verbose_name='issue'),
),
migrations.AlterField(
model_name='article',
name='linked',
field=models.ManyToManyField(blank=True, related_name='_article_linked_+', to='article.Article', verbose_name='linked articles'),
),
migrations.AlterField(
model_name='article',
name='page',
field=models.IntegerField(default=1, verbose_name='page'),
),
migrations.AlterField(
model_name='article',
name='title',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='articlecategory',
name='description',
field=models.TextField(max_length=10000, verbose_name='description'),
),
migrations.AlterField(
model_name='articlecategory',
name='newspaper',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='newspaper.Newspaper', verbose_name='newspaper'),
),
migrations.AlterField(
model_name='articlecategory',
name='title',
field=models.CharField(max_length=255, verbose_name='title'),
),
migrations.AlterField(
model_name='articletype',
name='description',
field=models.TextField(max_length=10000, verbose_name='description'),
),
migrations.AlterField(
model_name='articletype',
name='title',
field=models.CharField(max_length=255, verbose_name='title'),
),
]
| gpl-3.0 | 2,359,491,300,905,442,300 | 38.215909 | 147 | 0.587946 | false | 4.418694 | false | false | false |
kanafghan/fiziq-backend | src/api/default/__init__.py | 1 | 1182 | '''
Created on 08/03/2015
@author: Ismail Faizi
'''
import datetime
import endpoints
import re
defaultApi = endpoints.api(
name='default',
version='v1',
title='Fiziq Default API',
description='The API for Fiziq smarthphone applications.'
)
class Utilities(object):
"""
Utility logic for default API endpoints
"""
@classmethod
def load_entity(cls, class_name, key):
entity = class_name.get_by_urlsafe_key(key)
if not entity:
message = 'No {} with the key "{}" exists!'.format(class_name.__name__, key)
raise endpoints.NotFoundException(message)
return entity
@classmethod
def validate_email(cls, email):
m = re.match(r'^\S+@\S+\.\S+$', email)
if not m:
message = '{} is not a valid E-mail address!'.format(email)
raise endpoints.BadRequestException(message)
return email
@classmethod
def parse_date(cls, date, default=None, format="%Y-%m-%dT%H:%M:%S.%fZ"):
try:
date = datetime.datetime.strptime(date, format)
return date
except ValueError, ve:
pass
return default
| gpl-2.0 | 518,741,908,262,074,560 | 23.122449 | 88 | 0.598139 | false | 3.94 | false | false | false |
kobotoolbox/kobocat | onadata/apps/logger/models/instance.py | 1 | 16503 | # coding: utf-8
from datetime import date, datetime
from hashlib import sha256
import reversion
from django.contrib.auth.models import User
from django.contrib.gis.db import models
from django.contrib.gis.geos import GeometryCollection, Point
from django.db import transaction
from django.db.models import F
from django.db.models.signals import post_delete
from django.db.models.signals import post_save
from django.utils import timezone
from django.utils.encoding import smart_text
from jsonfield import JSONField
from taggit.managers import TaggableManager
from onadata.apps.logger.exceptions import FormInactiveError
from onadata.apps.logger.fields import LazyDefaultBooleanField
from onadata.apps.logger.models.survey_type import SurveyType
from onadata.apps.logger.models.xform import XForm
from onadata.apps.logger.models.submission_counter import SubmissionCounter
from onadata.apps.logger.xform_instance_parser import XFormInstanceParser, \
clean_and_parse_xml, get_uuid_from_xml
from onadata.libs.utils.common_tags import (
ATTACHMENTS,
GEOLOCATION,
ID,
MONGO_STRFTIME,
NOTES,
SUBMISSION_TIME,
TAGS,
UUID,
XFORM_ID_STRING,
SUBMITTED_BY
)
from onadata.libs.utils.model_tools import set_uuid
# need to establish id_string of the xform before we run get_dict since
# we now rely on data dictionary to parse the xml
def get_id_string_from_xml_str(xml_str):
xml_obj = clean_and_parse_xml(xml_str)
root_node = xml_obj.documentElement
id_string = root_node.getAttribute("id")
if len(id_string) == 0:
# may be hidden in submission/data/id_string
elems = root_node.getElementsByTagName('data')
for data in elems:
for child in data.childNodes:
id_string = data.childNodes[0].getAttribute('id')
if len(id_string) > 0:
break
if len(id_string) > 0:
break
return id_string
def submission_time():
return timezone.now()
def update_xform_submission_count(sender, instance, created, **kwargs):
if not created:
return
# `defer_counting` is a Python-only attribute
if getattr(instance, 'defer_counting', False):
return
with transaction.atomic():
xform = XForm.objects.only('user_id').get(pk=instance.xform_id)
# Update with `F` expression instead of `select_for_update` to avoid
# locks, which were mysteriously piling up during periods of high
# traffic
XForm.objects.filter(pk=instance.xform_id).update(
num_of_submissions=F('num_of_submissions') + 1,
last_submission_time=instance.date_created,
)
# Hack to avoid circular imports
UserProfile = User.profile.related.related_model
profile, created = UserProfile.objects.only('pk').get_or_create(
user_id=xform.user_id
)
UserProfile.objects.filter(pk=profile.pk).update(
num_of_submissions=F('num_of_submissions') + 1,
)
def nullify_exports_time_of_last_submission(sender, instance, **kwargs):
"""
Formerly, "deleting" a submission would set a flag on the `Instance`,
causing the `date_modified` attribute to be set to the current timestamp.
`Export.exports_outdated()` relied on this to detect when a new `Export`
needed to be generated due to submission deletion, but now that we always
delete `Instance`s outright, this trick doesn't work. This kludge simply
makes every `Export` for a form appear stale by nulling out its
`time_of_last_submission` attribute.
"""
# Avoid circular import
try:
export_model = instance.xform.export_set.model
except XForm.DoesNotExist:
return
f = instance.xform.export_set.filter(
# Match the statuses considered by `Export.exports_outdated()`
internal_status__in=[export_model.SUCCESSFUL, export_model.PENDING],
)
f.update(time_of_last_submission=None)
def update_user_submissions_counter(sender, instance, created, **kwargs):
if not created:
return
if getattr(instance, 'defer_counting', False):
return
# Querying the database this way because it's faster than querying
# the instance model for the data
user_id = XForm.objects.values_list('user_id', flat=True).get(
pk=instance.xform_id
)
today = date.today()
first_day_of_month = today.replace(day=1)
queryset = SubmissionCounter.objects.filter(
user_id=user_id, timestamp=first_day_of_month
)
if not queryset.exists():
SubmissionCounter.objects.create(user_id=user_id)
queryset.update(count=F('count') + 1)
def update_xform_submission_count_delete(sender, instance, **kwargs):
try:
xform = XForm.objects.select_for_update().get(pk=instance.xform.pk)
except XForm.DoesNotExist:
pass
else:
xform.num_of_submissions -= 1
if xform.num_of_submissions < 0:
xform.num_of_submissions = 0
# Update `date_modified` to detect outdated exports
# with deleted instances
xform.save(update_fields=['num_of_submissions', 'date_modified'])
profile_qs = User.profile.get_queryset()
try:
profile = profile_qs.select_for_update()\
.get(pk=xform.user.profile.pk)
except profile_qs.model.DoesNotExist:
pass
else:
profile.num_of_submissions -= 1
if profile.num_of_submissions < 0:
profile.num_of_submissions = 0
profile.save(update_fields=['num_of_submissions'])
@reversion.register
class Instance(models.Model):
XML_HASH_LENGTH = 64
DEFAULT_XML_HASH = None
json = JSONField(default={}, null=False)
xml = models.TextField()
xml_hash = models.CharField(max_length=XML_HASH_LENGTH, db_index=True, null=True,
default=DEFAULT_XML_HASH)
user = models.ForeignKey(User, related_name='instances', null=True, on_delete=models.CASCADE)
xform = models.ForeignKey(XForm, null=True, related_name='instances', on_delete=models.CASCADE)
survey_type = models.ForeignKey(SurveyType, on_delete=models.CASCADE)
# shows when we first received this instance
date_created = models.DateTimeField(auto_now_add=True)
# this will end up representing "date last parsed"
date_modified = models.DateTimeField(auto_now=True)
# this formerly represented "date instance was deleted".
# do not use it anymore.
deleted_at = models.DateTimeField(null=True, default=None)
# ODK keeps track of three statuses for an instance:
# incomplete, submitted, complete
# we add a fourth status: submitted_via_web
status = models.CharField(max_length=20,
default='submitted_via_web')
uuid = models.CharField(max_length=249, default='', db_index=True)
# store an geographic objects associated with this instance
geom = models.GeometryCollectionField(null=True)
tags = TaggableManager()
validation_status = JSONField(null=True, default=None)
# TODO Don't forget to update all records with command `update_is_sync_with_mongo`.
is_synced_with_mongo = LazyDefaultBooleanField(default=False)
# If XForm.has_kpi_hooks` is True, this field should be True either.
# It tells whether the instance has been successfully sent to KPI.
posted_to_kpi = LazyDefaultBooleanField(default=False)
class Meta:
app_label = 'logger'
@property
def asset(self):
"""
The goal of this property is to make the code future proof.
We can run the tests on kpi backend or kobocat backend.
Instance.asset will exist for both
It's used for validation_statuses.
:return: XForm
"""
return self.xform
def _check_active(self, force):
"""Check that form is active and raise exception if not.
:param force: Ignore restrictions on saving.
"""
if not force and self.xform and not self.xform.downloadable:
raise FormInactiveError()
def _set_geom(self):
xform = self.xform
data_dictionary = xform.data_dictionary()
geo_xpaths = data_dictionary.geopoint_xpaths()
doc = self.get_dict()
points = []
if len(geo_xpaths):
for xpath in geo_xpaths:
geometry = [float(s) for s in doc.get(xpath, '').split()]
if len(geometry):
lat, lng = geometry[0:2]
points.append(Point(lng, lat))
if not xform.instances_with_geopoints and len(points):
xform.instances_with_geopoints = True
xform.save()
self.geom = GeometryCollection(points)
def _set_json(self):
doc = self.get_dict()
if not self.date_created:
now = submission_time()
self.date_created = now
point = self.point
if point:
doc[GEOLOCATION] = [point.y, point.x]
doc[SUBMISSION_TIME] = self.date_created.strftime(MONGO_STRFTIME)
doc[XFORM_ID_STRING] = self._parser.get_xform_id_string()
doc[SUBMITTED_BY] = self.user.username\
if self.user is not None else None
self.json = doc
def _set_parser(self):
if not hasattr(self, "_parser"):
self._parser = XFormInstanceParser(
self.xml, self.xform.data_dictionary())
def _set_survey_type(self):
self.survey_type, created = \
SurveyType.objects.get_or_create(slug=self.get_root_node_name())
def _set_uuid(self):
if self.xml and not self.uuid:
uuid = get_uuid_from_xml(self.xml)
if uuid is not None:
self.uuid = uuid
set_uuid(self)
def _populate_xml_hash(self):
"""
Populate the `xml_hash` attribute of this `Instance` based on the content of the `xml`
attribute.
"""
self.xml_hash = self.get_hash(self.xml)
@classmethod
def populate_xml_hashes_for_instances(cls, usernames=None, pk__in=None, repopulate=False):
"""
Populate the `xml_hash` field for `Instance` instances limited to the specified users
and/or DB primary keys.
:param list[str] usernames: Optional list of usernames for whom `Instance`s will be
populated with hashes.
:param list[int] pk__in: Optional list of primary keys for `Instance`s that should be
populated with hashes.
:param bool repopulate: Optional argument to force repopulation of existing hashes.
:returns: Total number of `Instance`s updated.
:rtype: int
"""
filter_kwargs = dict()
if usernames:
filter_kwargs['xform__user__username__in'] = usernames
if pk__in:
filter_kwargs['pk__in'] = pk__in
# By default, skip over instances previously populated with hashes.
if not repopulate:
filter_kwargs['xml_hash'] = cls.DEFAULT_XML_HASH
# Query for the target `Instance`s.
target_instances_queryset = cls.objects.filter(**filter_kwargs)
# Exit quickly if there's nothing to do.
if not target_instances_queryset.exists():
return 0
# Limit our queryset result content since we'll only need the `pk` and `xml` attributes.
target_instances_queryset = target_instances_queryset.only('pk', 'xml')
instances_updated_total = 0
# Break the potentially large `target_instances_queryset` into chunks to avoid memory
# exhaustion.
chunk_size = 2000
target_instances_queryset = target_instances_queryset.order_by('pk')
target_instances_qs_chunk = target_instances_queryset
while target_instances_qs_chunk.exists():
# Take a chunk of the target `Instance`s.
target_instances_qs_chunk = target_instances_qs_chunk[0:chunk_size]
for instance in target_instances_qs_chunk:
pk = instance.pk
xml = instance.xml
# Do a `Queryset.update()` on this individual instance to avoid signals triggering
# things like `Reversion` versioning.
instances_updated_count = Instance.objects.filter(pk=pk).update(
xml_hash=cls.get_hash(xml))
instances_updated_total += instances_updated_count
# Set up the next chunk
target_instances_qs_chunk = target_instances_queryset.filter(
pk__gt=instance.pk)
return instances_updated_total
def get(self, abbreviated_xpath):
self._set_parser()
return self._parser.get(abbreviated_xpath)
def get_dict(self, force_new=False, flat=True):
"""Return a python object representation of this instance's XML."""
self._set_parser()
return self._parser.get_flat_dict_with_attributes() if flat else\
self._parser.to_dict()
def get_full_dict(self):
# TODO should we store all of these in the JSON no matter what?
d = self.json
data = {
UUID: self.uuid,
ID: self.id,
self.USERFORM_ID: '%s_%s' % (
self.user.username,
self.xform.id_string),
ATTACHMENTS: [a.media_file.name for a in
self.attachments.all()],
self.STATUS: self.status,
TAGS: list(self.tags.names()),
NOTES: self.get_notes()
}
d.update(data)
return d
def get_notes(self):
return [note['note'] for note in self.notes.values('note')]
def get_root_node(self):
self._set_parser()
return self._parser.get_root_node()
def get_root_node_name(self):
self._set_parser()
return self._parser.get_root_node_name()
@staticmethod
def get_hash(input_string):
"""
Compute the SHA256 hash of the given string. A wrapper to standardize hash computation.
:param string_types input_string: The string to be hashed.
:return: The resulting hash.
:rtype: str
"""
input_string = smart_text(input_string)
return sha256(input_string.encode()).hexdigest()
@property
def point(self):
gc = self.geom
if gc and len(gc):
return gc[0]
def save(self, *args, **kwargs):
force = kwargs.pop("force", False)
self._check_active(force)
self._set_geom()
self._set_json()
self._set_survey_type()
self._set_uuid()
self._populate_xml_hash()
# Force validation_status to be dict
if self.validation_status is None:
self.validation_status = {}
super().save(*args, **kwargs)
def get_validation_status(self):
"""
Returns instance validation status.
:return: object
"""
# This method can be tweaked to implement default validation status
# For example:
# if not self.validation_status:
# self.validation_status = self.asset.settings.get("validation_statuses")[0]
return self.validation_status
post_save.connect(update_xform_submission_count, sender=Instance,
dispatch_uid='update_xform_submission_count')
post_delete.connect(nullify_exports_time_of_last_submission, sender=Instance,
dispatch_uid='nullify_exports_time_of_last_submission')
post_save.connect(update_user_submissions_counter, sender=Instance,
dispatch_uid='update_user_submissions_counter')
post_delete.connect(update_xform_submission_count_delete, sender=Instance,
dispatch_uid='update_xform_submission_count_delete')
if Instance.XML_HASH_LENGTH / 2 != sha256().digest_size:
raise AssertionError('SHA256 hash `digest_size` expected to be `{}`, not `{}`'.format(
Instance.XML_HASH_LENGTH, sha256().digest_size))
class InstanceHistory(models.Model):
class Meta:
app_label = 'logger'
xform_instance = models.ForeignKey(
Instance, related_name='submission_history', on_delete=models.CASCADE)
xml = models.TextField()
# old instance id
uuid = models.CharField(max_length=249, default='')
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
| bsd-2-clause | -6,849,813,254,606,204,000 | 34.33833 | 99 | 0.636612 | false | 3.955657 | false | false | false |
Agiroq/VW | gas.py | 1 | 1152 | import machine, time
from machine import Timer
GASANALOG = 0
ALARMLED = 13
gasA = machine.ADC(GASANALOG)
gasLED = machine.Pin(ALARMLED, machine.Pin.OUT)
class CheckGas():
"""docstring for checkGas."""
def __init__(self, led, sensor, time=5000, level=480):
super(CheckGas, self).__init__()
self.led = led
self.led.high()
time.sleep_ms(500)
self.led.low()
time.sleep_ms(500)
self.led.high()
time.sleep_ms(500)
self.led.low()
self.gas = sensor
self.timer = Timer(-1)
self.level = level
self.time = time
self.start(self.time)
def checkGas(self):
value = self.gas.read()
check = 0
if (self.gas.read()>self.level):
self.led.high()
check = 1
else:
self.led.low()
check = 0
print(value, check)
return check
def start(self, time):
self.timer.init(period=time, mode=Timer.PERIODIC,
callback=lambda t:self.checkGas())
def stop(self):
self.timer.deinit()
g = CheckGas(gasLED, gasA, 5000)
| cc0-1.0 | -591,044,774,667,530,100 | 23 | 58 | 0.545139 | false | 3.418398 | false | false | false |
GNOME/pygoocanvas | demo/simple_demo/clipping_demo.py | 1 | 5063 | import goocanvas
import gtk
import cairo
def on_button_press (item, target, event, id):
print "%s received 'button-press' signal at %f, %f (root: %f, %f)" % \
(id, event.x, event.y, event.x_root, event.y_root)
return True
def setup_canvas (canvas):
root = canvas.get_root_item ()
#Plain items without clip path.
item = goocanvas.Ellipse (parent = root,
center_x = 0,
center_y = 0,
radius_x = 50,
radius_y = 30,
fill_color = "blue")
item.translate (100, 100)
item.rotate (30, 0, 0)
item.connect ("button_press_event",
on_button_press, "Blue ellipse (unclipped)")
item = goocanvas.Rect (parent = root,
x = 200,
y = 50,
width = 100,
height = 100,
fill_color = "red",
clip_fill_rule = cairo.FILL_RULE_EVEN_ODD)
item.connect ("button_press_event",
on_button_press, "Red rectangle (unclipped)")
item = goocanvas.Rect (parent = root,
x = 380,
y = 50,
width = 100,
height = 100,
fill_color = "yellow")
item.connect ("button_press_event",
on_button_press, "Yellow rectangle (unclipped)")
# Clipped items.
item = goocanvas.Ellipse (parent = root,
center_x = 0,
center_y = 0,
radius_x = 50,
radius_y = 30,
fill_color = "blue",
clip_path = "M 0 0 h 100 v 100 h -100 Z")
item.translate (100, 300)
item.rotate (30, 0, 0)
item.connect ("button_press_event", on_button_press, "Blue ellipse")
item = goocanvas.Rect (parent = root,
x = 200,
y = 250,
width = 100,
height = 100,
fill_color = "red",
clip_path = "M 250 300 h 100 v 100 h -100 Z",
clip_fill_rule = cairo.FILL_RULE_EVEN_ODD)
item.connect ("button_press_event", on_button_press, "Red rectangle")
item = goocanvas.Rect (parent = root,
x = 380,
y = 250,
width = 100,
height = 100,
fill_color = "yellow",
clip_path = "M480,230 l40,100 l-80 0 z")
item.connect ("button_press_event", on_button_press, "Yellow rectangle")
# Table with clipped items.
table = goocanvas.Table (parent = root)
table.translate (200, 400)
table.rotate (30, 0, 0)
item = goocanvas.Ellipse (parent = table,
center_x = 0,
center_y = 0,
radius_x = 50,
radius_y = 30,
fill_color = "blue",
clip_path = "M 0 0 h 100 v 100 h -100 Z")
item.translate (100, 300)
item.rotate (30, 0, 0)
item.connect ("button_press_event", on_button_press, "Blue ellipse")
item = goocanvas.Rect (parent = table,
x = 200,
y = 250,
width = 100,
height = 100,
fill_color = "red",
clip_path = "M 250 300 h 100 v 100 h -100 Z",
clip_fill_rule = cairo.FILL_RULE_EVEN_ODD)
item.connect ("button_press_event", on_button_press, "Red rectangle")
table.set_child_properties (item, column = 1)
item = goocanvas.Rect (parent = table,
x = 380,
y = 250,
width = 100,
height = 100,
fill_color = "yellow",
clip_path = "M480,230 l40,100 l-80 0 z")
item.connect ("button_press_event", on_button_press, "Yellow rectangle")
table.set_child_properties (item, column = 2)
def create_clipping_page ():
vbox = gtk.VBox (False, 4)
vbox.set_border_width (4)
scrolled_win = gtk.ScrolledWindow ()
scrolled_win.set_shadow_type (gtk.SHADOW_IN)
vbox.add (scrolled_win)
canvas = goocanvas.Canvas ()
canvas.set_size_request (600, 450)
canvas.set_bounds (0, 0, 1000, 1000)
scrolled_win.add (canvas)
setup_canvas (canvas)
return vbox
def main ():
vb = create_clipping_page ()
win = gtk.Window()
win.connect("destroy", gtk.main_quit)
win.add(vb)
win.show_all()
gtk.main()
if __name__ == "__main__":
main ()
| lgpl-2.1 | 4,378,497,605,375,581,700 | 34.907801 | 76 | 0.439265 | false | 4.063403 | false | false | false |
magfest/ubersystem | tests/locust/locustfile.py | 1 | 2821 | """
Load tests using locust.io.
"""
import urllib3
import faker
from locust import HttpLocust, TaskSet, task
urllib3.disable_warnings()
fake = faker.Faker()
faker.providers.phone_number.en_US.Provider.formats = ('888-555-####',)
class AttendeeBehavior(TaskSet):
min_wait = 1000
max_wait = 10000
def on_start(self):
self.verify = False
def get_static_assets(self):
self.client.get('/static/deps/combined.min.css', verify=self.verify)
self.client.get('/static_views/styles/main.css', verify=self.verify)
self.client.get('/static/theme/prereg.css', verify=self.verify)
self.client.get('/static/theme/prereg_extra.css', verify=self.verify)
self.client.get('/static/deps/combined.min.js', verify=self.verify)
self.client.get('/static/js/common-static.js', verify=self.verify)
self.client.get('/static/theme/tile-background.png', verify=self.verify)
self.client.get('/static/images/loading.gif', verify=self.verify)
self.client.get('/static/theme/banner_2x.png', verify=self.verify)
@task
def preregister(self):
response = self.client.get('/preregistration/form', verify=self.verify)
if response.status_code != 200:
return
self.get_static_assets()
response = self.client.post(
'/preregistration/post_form',
verify=self.verify,
data={
'badge_type': '51352218',
'name': '',
'badges': '1',
'first_name': fake.first_name(),
'last_name': fake.last_name(),
'same_legal_name': "Yep, that's right",
'legal_name': '',
'amount_extra': '0',
'badge_printed_name': '',
'affiliate': '',
'shirt': '0',
'birthdate': fake.date_time_between('-80y', '-14y').strftime('%Y-%m-%d'),
'email': fake.safe_email(),
'zip_code': fake.zipcode(),
'ec_name': fake.name(),
'ec_phone': fake.phone_number(),
'cellphone': fake.phone_number(),
'found_how': fake.catch_phrase(),
'comments': fake.paragraph(),
'extra_donation': '',
'pii_consent': '1',
}
)
if response.status_code != 200:
return
response = self.client.get('/preregistration/process_free_prereg', verify=self.verify)
if response.status_code != 200:
return
response = self.client.get('/preregistration/paid_preregistrations?payment_received=0', verify=self.verify)
if response.status_code != 200:
return
class AttendeeLocust(HttpLocust):
task_set = AttendeeBehavior
| agpl-3.0 | 7,510,887,857,052,802,000 | 33.402439 | 115 | 0.562212 | false | 3.746348 | false | false | false |
christofdamian/gpodder | src/gpodder/model.py | 1 | 44899 | # -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2010 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.model - Core model classes for gPodder (2009-08-13)
# Based on libpodcasts.py (thp, 2005-10-29)
#
import gpodder
from gpodder import util
from gpodder import feedcore
from gpodder import youtube
from gpodder import corestats
from gpodder.liblogger import log
import os
import re
import glob
import shutil
import time
import datetime
import rfc822
import hashlib
import feedparser
import xml.sax.saxutils
_ = gpodder.gettext
class CustomFeed(feedcore.ExceptionWithData): pass
class gPodderFetcher(feedcore.Fetcher):
"""
This class extends the feedcore Fetcher with the gPodder User-Agent and the
Proxy handler based on the current settings in gPodder and provides a
convenience method (fetch_channel) for use by PodcastChannel objects.
"""
custom_handlers = []
def __init__(self):
feedcore.Fetcher.__init__(self, gpodder.user_agent)
def fetch_channel(self, channel):
etag = channel.etag
modified = feedparser._parse_date(channel.last_modified)
# If we have a username or password, rebuild the url with them included
# Note: using a HTTPBasicAuthHandler would be pain because we need to
# know the realm. It can be done, but I think this method works, too
url = channel.authenticate_url(channel.url)
for handler in self.custom_handlers:
custom_feed = handler.handle_url(url)
if custom_feed is not None:
raise CustomFeed(custom_feed)
self.fetch(url, etag, modified)
def _resolve_url(self, url):
return youtube.get_real_channel_url(url)
@classmethod
def register(cls, handler):
cls.custom_handlers.append(handler)
# def _get_handlers(self):
# # Add a ProxyHandler for fetching data via a proxy server
# proxies = {'http': 'http://proxy.example.org:8080'}
# return[urllib2.ProxyHandler(proxies))]
# The "register" method is exposed here for external usage
register_custom_handler = gPodderFetcher.register
class PodcastModelObject(object):
"""
A generic base class for our podcast model providing common helper
and utility functions.
"""
@classmethod
def create_from_dict(cls, d, *args):
"""
Create a new object, passing "args" to the constructor
and then updating the object with the values from "d".
"""
o = cls(*args)
o.update_from_dict(d)
return o
def update_from_dict(self, d):
"""
Updates the attributes of this object with values from the
dictionary "d" by using the keys found in "d".
"""
for k in d:
if hasattr(self, k):
setattr(self, k, d[k])
class PodcastChannel(PodcastModelObject):
"""holds data for a complete channel"""
MAX_FOLDERNAME_LENGTH = 150
SECONDS_PER_WEEK = 7*24*60*60
feed_fetcher = gPodderFetcher()
@classmethod
def build_factory(cls, download_dir):
def factory(dict, db):
return cls.create_from_dict(dict, db, download_dir)
return factory
@classmethod
def load_from_db(cls, db, download_dir):
return db.load_channels(factory=cls.build_factory(download_dir))
@classmethod
def load(cls, db, url, create=True, authentication_tokens=None,\
max_episodes=0, download_dir=None, allow_empty_feeds=False):
if isinstance(url, unicode):
url = url.encode('utf-8')
tmp = db.load_channels(factory=cls.build_factory(download_dir), url=url)
if len(tmp):
return tmp[0]
elif create:
tmp = PodcastChannel(db, download_dir)
tmp.url = url
if authentication_tokens is not None:
tmp.username = authentication_tokens[0]
tmp.password = authentication_tokens[1]
tmp.update(max_episodes)
tmp.save()
db.force_last_new(tmp)
# Subscribing to empty feeds should yield an error (except if
# the user specifically allows empty feeds in the config UI)
if sum(tmp.get_statistics()) == 0 and not allow_empty_feeds:
tmp.delete()
raise Exception(_('No downloadable episodes in feed'))
return tmp
def episode_factory(self, d, db__parameter_is_unused=None):
"""
This function takes a dictionary containing key-value pairs for
episodes and returns a new PodcastEpisode object that is connected
to this PodcastChannel object.
Returns: A new PodcastEpisode object
"""
return PodcastEpisode.create_from_dict(d, self)
def _consume_custom_feed(self, custom_feed, max_episodes=0):
self.title = custom_feed.get_title()
self.link = custom_feed.get_link()
self.description = custom_feed.get_description()
self.image = custom_feed.get_image()
self.pubDate = time.time()
self.save()
guids = [episode.guid for episode in self.get_all_episodes()]
# Insert newly-found episodes into the database
custom_feed.get_new_episodes(self, guids)
self.save()
self.db.purge(max_episodes, self.id)
def _consume_updated_feed(self, feed, max_episodes=0):
self.parse_error = feed.get('bozo_exception', None)
self.title = feed.feed.get('title', self.url)
self.link = feed.feed.get('link', self.link)
self.description = feed.feed.get('subtitle', self.description)
# Start YouTube-specific title FIX
YOUTUBE_PREFIX = 'Uploads by '
if self.title.startswith(YOUTUBE_PREFIX):
self.title = self.title[len(YOUTUBE_PREFIX):] + ' on YouTube'
# End YouTube-specific title FIX
try:
self.pubDate = rfc822.mktime_tz(feed.feed.get('updated_parsed', None+(0,)))
except:
self.pubDate = time.time()
if hasattr(feed.feed, 'image'):
for attribute in ('href', 'url'):
new_value = getattr(feed.feed.image, attribute, None)
if new_value is not None:
log('Found cover art in %s: %s', attribute, new_value)
self.image = new_value
if hasattr(feed.feed, 'icon'):
self.image = feed.feed.icon
self.save()
# Load all episodes to update them properly.
existing = self.get_all_episodes()
# We can limit the maximum number of entries that gPodder will parse
if max_episodes > 0 and len(feed.entries) > max_episodes:
entries = feed.entries[:max_episodes]
else:
entries = feed.entries
# Title + PubDate hashes for existing episodes
existing_dupes = dict((e.duplicate_id(), e) for e in existing)
# GUID-based existing episode list
existing_guids = dict((e.guid, e) for e in existing)
# Get most recent pubDate of all episodes
last_pubdate = self.db.get_last_pubdate(self) or 0
# Search all entries for new episodes
for entry in entries:
try:
episode = PodcastEpisode.from_feedparser_entry(entry, self)
if episode is not None and not episode.title:
episode.title, ext = os.path.splitext(os.path.basename(episode.url))
except Exception, e:
log('Cannot instantiate episode: %s. Skipping.', e, sender=self, traceback=True)
continue
if episode is None:
continue
# Detect (and update) existing episode based on GUIDs
existing_episode = existing_guids.get(episode.guid, None)
if existing_episode:
existing_episode.update_from(episode)
existing_episode.save()
continue
# Detect (and update) existing episode based on duplicate ID
existing_episode = existing_dupes.get(episode.duplicate_id(), None)
if existing_episode:
if existing_episode.is_duplicate(episode):
existing_episode.update_from(episode)
existing_episode.save()
continue
# Workaround for bug 340: If the episode has been
# published earlier than one week before the most
# recent existing episode, do not mark it as new.
if episode.pubDate < last_pubdate - self.SECONDS_PER_WEEK:
log('Episode with old date: %s', episode.title, sender=self)
episode.is_played = True
episode.save()
# Remove "unreachable" episodes - episodes that have not been
# downloaded and that the feed does not list as downloadable anymore
if self.id is not None:
seen_guids = set(e.guid for e in feed.entries if hasattr(e, 'guid'))
episodes_to_purge = (e for e in existing if \
e.state != gpodder.STATE_DOWNLOADED and \
e.guid not in seen_guids and e.guid is not None)
for episode in episodes_to_purge:
log('Episode removed from feed: %s (%s)', episode.title, \
episode.guid, sender=self)
self.db.delete_episode_by_guid(episode.guid, self.id)
# This *might* cause episodes to be skipped if there were more than
# max_episodes_per_feed items added to the feed between updates.
# The benefit is that it prevents old episodes from apearing as new
# in certain situations (see bug #340).
self.db.purge(max_episodes, self.id)
def update_channel_lock(self):
self.db.update_channel_lock(self)
def _update_etag_modified(self, feed):
self.updated_timestamp = time.time()
self.calculate_publish_behaviour()
self.etag = feed.headers.get('etag', self.etag)
self.last_modified = feed.headers.get('last-modified', self.last_modified)
def query_automatic_update(self):
"""Query if this channel should be updated automatically
Returns True if the update should happen in automatic
mode or False if this channel should be skipped (timeout
not yet reached or release not expected right now).
"""
updated = self.updated_timestamp
expected = self.release_expected
now = time.time()
one_day_ago = now - 60*60*24
lastcheck = now - 60*10
return updated < one_day_ago or \
(expected < now and updated < lastcheck)
def update(self, max_episodes=0):
try:
self.feed_fetcher.fetch_channel(self)
except CustomFeed, updated:
custom_feed = updated.data
self._consume_custom_feed(custom_feed, max_episodes)
self.save()
except feedcore.UpdatedFeed, updated:
feed = updated.data
self._consume_updated_feed(feed, max_episodes)
self._update_etag_modified(feed)
self.save()
except feedcore.NewLocation, updated:
feed = updated.data
self.url = feed.href
self._consume_updated_feed(feed, max_episodes)
self._update_etag_modified(feed)
self.save()
except feedcore.NotModified, updated:
feed = updated.data
self._update_etag_modified(feed)
self.save()
except Exception, e:
# "Not really" errors
#feedcore.AuthenticationRequired
# Temporary errors
#feedcore.Offline
#feedcore.BadRequest
#feedcore.InternalServerError
#feedcore.WifiLogin
# Permanent errors
#feedcore.Unsubscribe
#feedcore.NotFound
#feedcore.InvalidFeed
#feedcore.UnknownStatusCode
raise
self.db.commit()
def delete(self):
self.db.delete_channel(self)
def save(self):
self.db.save_channel(self)
def get_statistics(self):
if self.id is None:
return (0, 0, 0, 0, 0)
else:
return self.db.get_channel_count(int(self.id))
def authenticate_url(self, url):
return util.url_add_authentication(url, self.username, self.password)
def __init__(self, db, download_dir):
self.db = db
self.download_dir = download_dir
self.id = None
self.url = None
self.title = ''
self.link = ''
self.description = ''
self.image = None
self.pubDate = 0
self.parse_error = None
self.foldername = None
self.auto_foldername = 1 # automatically generated foldername
# should this channel be synced to devices? (ex: iPod)
self.sync_to_devices = True
# to which playlist should be synced
self.device_playlist_name = 'gPodder'
# if set, this overrides the channel-provided title
self.override_title = ''
self.username = ''
self.password = ''
self.last_modified = None
self.etag = None
self.save_dir_size = 0
self.__save_dir_size_set = False
self.channel_is_locked = False
self.release_expected = time.time()
self.release_deviation = 0
self.updated_timestamp = 0
def calculate_publish_behaviour(self):
episodes = self.db.load_episodes(self, factory=self.episode_factory, limit=30)
if len(episodes) < 3:
return
deltas = []
latest = max(e.pubDate for e in episodes)
for index in range(len(episodes)-1):
if episodes[index].pubDate != 0 and episodes[index+1].pubDate != 0:
deltas.append(episodes[index].pubDate - episodes[index+1].pubDate)
if len(deltas) > 1:
stats = corestats.Stats(deltas)
self.release_expected = min([latest+stats.stdev(), latest+(stats.min()+stats.avg())*.5])
self.release_deviation = stats.stdev()
else:
self.release_expected = latest
self.release_deviation = 0
def request_save_dir_size(self):
if not self.__save_dir_size_set:
self.update_save_dir_size()
self.__save_dir_size_set = True
def update_save_dir_size(self):
self.save_dir_size = util.calculate_size(self.save_dir)
def get_title( self):
if self.override_title:
return self.override_title
elif not self.__title.strip():
return self.url
else:
return self.__title
def set_title( self, value):
self.__title = value.strip()
title = property(fget=get_title,
fset=set_title)
def set_custom_title( self, custom_title):
custom_title = custom_title.strip()
# if the custom title is the same as we have
if custom_title == self.override_title:
return
# if custom title is the same as channel title and we didn't have a custom title
if custom_title == self.__title and self.override_title == '':
return
# make sure self.foldername is initialized
self.get_save_dir()
# rename folder if custom_title looks sane
new_folder_name = self.find_unique_folder_name(custom_title)
if len(new_folder_name) > 0 and new_folder_name != self.foldername:
log('Changing foldername based on custom title: %s', custom_title, sender=self)
new_folder = os.path.join(self.download_dir, new_folder_name)
old_folder = os.path.join(self.download_dir, self.foldername)
if os.path.exists(old_folder):
if not os.path.exists(new_folder):
# Old folder exists, new folder does not -> simply rename
log('Renaming %s => %s', old_folder, new_folder, sender=self)
os.rename(old_folder, new_folder)
else:
# Both folders exist -> move files and delete old folder
log('Moving files from %s to %s', old_folder, new_folder, sender=self)
for file in glob.glob(os.path.join(old_folder, '*')):
shutil.move(file, new_folder)
log('Removing %s', old_folder, sender=self)
shutil.rmtree(old_folder, ignore_errors=True)
self.foldername = new_folder_name
self.save()
if custom_title != self.__title:
self.override_title = custom_title
else:
self.override_title = ''
def get_downloaded_episodes(self):
return self.db.load_episodes(self, factory=self.episode_factory, state=gpodder.STATE_DOWNLOADED)
def get_new_episodes(self, downloading=lambda e: False):
"""
Get a list of new episodes. You can optionally specify
"downloading" as a callback that takes an episode as
a parameter and returns True if the episode is currently
being downloaded or False if not.
By default, "downloading" is implemented so that it
reports all episodes as not downloading.
"""
return [episode for episode in self.db.load_episodes(self, \
factory=self.episode_factory, state=gpodder.STATE_NORMAL) if \
episode.check_is_new(downloading=downloading)]
def get_playlist_filename(self):
# If the save_dir doesn't end with a slash (which it really should
# not, if the implementation is correct, we can just append .m3u :)
assert self.save_dir[-1] != '/'
return self.save_dir+'.m3u'
def update_m3u_playlist(self):
m3u_filename = self.get_playlist_filename()
downloaded_episodes = self.get_downloaded_episodes()
if not downloaded_episodes:
log('No episodes - removing %s', m3u_filename, sender=self)
util.delete_file(m3u_filename)
return
log('Writing playlist to %s', m3u_filename, sender=self)
f = open(m3u_filename, 'w')
f.write('#EXTM3U\n')
for episode in PodcastEpisode.sort_by_pubdate(downloaded_episodes):
if episode.was_downloaded(and_exists=True):
filename = episode.local_filename(create=False)
assert filename is not None
if os.path.dirname(filename).startswith(os.path.dirname(m3u_filename)):
filename = filename[len(os.path.dirname(m3u_filename)+os.sep):]
f.write('#EXTINF:0,'+self.title+' - '+episode.title+' ('+episode.cute_pubdate()+')\n')
f.write(filename+'\n')
f.close()
def get_all_episodes(self):
return self.db.load_episodes(self, factory=self.episode_factory)
def find_unique_folder_name(self, foldername):
# Remove trailing dots to avoid errors on Windows (bug 600)
foldername = foldername.strip().rstrip('.')
current_try = util.sanitize_filename(foldername, \
self.MAX_FOLDERNAME_LENGTH)
next_try_id = 2
while True:
if self.db.channel_foldername_exists(current_try):
current_try = '%s (%d)' % (foldername, next_try_id)
next_try_id += 1
else:
return current_try
def get_save_dir(self):
urldigest = hashlib.md5(self.url).hexdigest()
sanitizedurl = util.sanitize_filename(self.url, self.MAX_FOLDERNAME_LENGTH)
if self.foldername is None or (self.auto_foldername and (self.foldername == urldigest or self.foldername.startswith(sanitizedurl))):
# we must change the folder name, because it has not been set manually
fn_template = util.sanitize_filename(self.title, self.MAX_FOLDERNAME_LENGTH)
# if this is an empty string, try the basename
if len(fn_template) == 0:
log('That is one ugly feed you have here! (Report this to bugs.gpodder.org: %s)', self.url, sender=self)
fn_template = util.sanitize_filename(os.path.basename(self.url), self.MAX_FOLDERNAME_LENGTH)
# If the basename is also empty, use the first 6 md5 hexdigest chars of the URL
if len(fn_template) == 0:
log('That is one REALLY ugly feed you have here! (Report this to bugs.gpodder.org: %s)', self.url, sender=self)
fn_template = urldigest # no need for sanitize_filename here
# Find a unique folder name for this podcast
wanted_foldername = self.find_unique_folder_name(fn_template)
# if the foldername has not been set, check if the (old) md5 filename exists
if self.foldername is None and os.path.exists(os.path.join(self.download_dir, urldigest)):
log('Found pre-0.15.0 download folder for %s: %s', self.title, urldigest, sender=self)
self.foldername = urldigest
# we have a valid, new folder name in "current_try" -> use that!
if self.foldername is not None and wanted_foldername != self.foldername:
# there might be an old download folder crawling around - move it!
new_folder_name = os.path.join(self.download_dir, wanted_foldername)
old_folder_name = os.path.join(self.download_dir, self.foldername)
if os.path.exists(old_folder_name):
if not os.path.exists(new_folder_name):
# Old folder exists, new folder does not -> simply rename
log('Renaming %s => %s', old_folder_name, new_folder_name, sender=self)
os.rename(old_folder_name, new_folder_name)
else:
# Both folders exist -> move files and delete old folder
log('Moving files from %s to %s', old_folder_name, new_folder_name, sender=self)
for file in glob.glob(os.path.join(old_folder_name, '*')):
shutil.move(file, new_folder_name)
log('Removing %s', old_folder_name, sender=self)
shutil.rmtree(old_folder_name, ignore_errors=True)
log('Updating foldername of %s to "%s".', self.url, wanted_foldername, sender=self)
self.foldername = wanted_foldername
self.save()
save_dir = os.path.join(self.download_dir, self.foldername)
# Create save_dir if it does not yet exist
if not util.make_directory( save_dir):
log( 'Could not create save_dir: %s', save_dir, sender = self)
return save_dir
save_dir = property(fget=get_save_dir)
def remove_downloaded( self):
shutil.rmtree( self.save_dir, True)
@property
def cover_file(self):
new_name = os.path.join(self.save_dir, 'folder.jpg')
if not os.path.exists(new_name):
old_names = ('cover', '.cover')
for old_name in old_names:
filename = os.path.join(self.save_dir, old_name)
if os.path.exists(filename):
shutil.move(filename, new_name)
return new_name
return new_name
def delete_episode(self, episode):
filename = episode.local_filename(create=False, check_only=True)
if filename is not None:
util.delete_file(filename)
episode.set_state(gpodder.STATE_DELETED)
class PodcastEpisode(PodcastModelObject):
"""holds data for one object in a channel"""
MAX_FILENAME_LENGTH = 200
def _get_played(self):
return self.is_played
def _set_played(self, played):
self.is_played = played
# Alias "is_played" to "played" for DB column mapping
played = property(fget=_get_played, fset=_set_played)
def _get_locked(self):
return self.is_locked
def _set_locked(self, locked):
self.is_locked = locked
# Alias "is_locked" to "locked" for DB column mapping
locked = property(fget=_get_locked, fset=_set_locked)
def _get_channel_id(self):
return self.channel.id
def _set_channel_id(self, channel_id):
assert self.channel.id == channel_id
# Accessor for the "channel_id" DB column
channel_id = property(fget=_get_channel_id, fset=_set_channel_id)
@staticmethod
def sort_by_pubdate(episodes, reverse=False):
"""Sort a list of PodcastEpisode objects chronologically
Returns a iterable, sorted sequence of the episodes
"""
key_pubdate = lambda e: e.pubDate
return sorted(episodes, key=key_pubdate, reverse=reverse)
def reload_from_db(self):
"""
Re-reads all episode details for this object from the
database and updates this object accordingly. Can be
used to refresh existing objects when the database has
been updated (e.g. the filename has been set after a
download where it was not set before the download)
"""
d = self.db.load_episode(self.id)
self.update_from_dict(d or {})
return self
def has_website_link(self):
return bool(self.link) and (self.link != self.url)
@staticmethod
def from_feedparser_entry(entry, channel):
episode = PodcastEpisode(channel)
episode.title = entry.get('title', '')
episode.link = entry.get('link', '')
episode.description = entry.get('summary', '')
# Fallback to subtitle if summary is not available0
if not episode.description:
episode.description = entry.get('subtitle', '')
episode.guid = entry.get('id', '')
if entry.get('updated_parsed', None):
episode.pubDate = rfc822.mktime_tz(entry.updated_parsed+(0,))
enclosures = entry.get('enclosures', ())
audio_available = any(e.get('type', '').startswith('audio/') \
for e in enclosures)
video_available = any(e.get('type', '').startswith('video/') \
for e in enclosures)
# Enclosures
for e in enclosures:
episode.mimetype = e.get('type', 'application/octet-stream')
if episode.mimetype == '':
# See Maemo bug 10036
log('Fixing empty mimetype in ugly feed', sender=episode)
episode.mimetype = 'application/octet-stream'
if '/' not in episode.mimetype:
continue
# Skip images in feeds if audio or video is available (bug 979)
if episode.mimetype.startswith('image/') and \
(audio_available or video_available):
continue
episode.url = util.normalize_feed_url(e.get('href', ''))
if not episode.url:
continue
try:
episode.length = int(e.length) or -1
except:
episode.length = -1
return episode
# Media RSS content
for m in entry.get('media_content', ()):
episode.mimetype = m.get('type', 'application/octet-stream')
if '/' not in episode.mimetype:
continue
episode.url = util.normalize_feed_url(m.get('url', ''))
if not episode.url:
continue
try:
episode.length = int(m.fileSize) or -1
except:
episode.length = -1
return episode
# Brute-force detection of any links
for l in entry.get('links', ()):
episode.url = util.normalize_feed_url(l.get('href', ''))
if not episode.url:
continue
if youtube.is_video_link(episode.url):
return episode
# Check if we can resolve this link to a audio/video file
filename, extension = util.filename_from_url(episode.url)
file_type = util.file_type_by_extension(extension)
if file_type is None and hasattr(l, 'type'):
extension = util.extension_from_mimetype(l.type)
file_type = util.file_type_by_extension(extension)
# The link points to a audio or video file - use it!
if file_type is not None:
return episode
# Scan MP3 links in description text
mp3s = re.compile(r'http://[^"]*\.mp3')
for content in entry.get('content', ()):
html = content.value
for match in mp3s.finditer(html):
episode.url = match.group(0)
return episode
return None
def __init__(self, channel):
self.db = channel.db
# Used by Storage for faster saving
self.id = None
self.url = ''
self.title = ''
self.length = 0
self.mimetype = 'application/octet-stream'
self.guid = ''
self.description = ''
self.link = ''
self.channel = channel
self.pubDate = 0
self.filename = None
self.auto_filename = 1 # automatically generated filename
self.state = gpodder.STATE_NORMAL
self.is_played = False
# Initialize the "is_locked" property
self._is_locked = False
self.is_locked = channel.channel_is_locked
# Time attributes
self.total_time = 0
self.current_position = 0
self.current_position_updated = time.time()
def get_is_locked(self):
return self._is_locked
def set_is_locked(self, is_locked):
self._is_locked = bool(is_locked)
is_locked = property(fget=get_is_locked, fset=set_is_locked)
def save(self):
if self.state != gpodder.STATE_DOWNLOADED and self.file_exists():
self.state = gpodder.STATE_DOWNLOADED
self.db.save_episode(self)
def on_downloaded(self, filename):
self.state = gpodder.STATE_DOWNLOADED
self.is_played = False
self.length = os.path.getsize(filename)
self.db.save_downloaded_episode(self)
self.db.commit()
def set_state(self, state):
self.state = state
self.db.update_episode_state(self)
def mark(self, state=None, is_played=None, is_locked=None):
if state is not None:
self.state = state
if is_played is not None:
self.is_played = is_played
if is_locked is not None:
self.is_locked = is_locked
self.db.update_episode_state(self)
@property
def title_markup(self):
return '%s\n<small>%s</small>' % (xml.sax.saxutils.escape(self.title),
xml.sax.saxutils.escape(self.channel.title))
@property
def maemo_markup(self):
if self.length > 0:
length_str = '%s; ' % self.filesize_prop
else:
length_str = ''
return ('<b>%s</b>\n<small>%s'+_('released %s')+ \
'; '+_('from %s')+'</small>') % (\
xml.sax.saxutils.escape(self.title), \
xml.sax.saxutils.escape(length_str), \
xml.sax.saxutils.escape(self.pubdate_prop), \
xml.sax.saxutils.escape(self.channel.title))
@property
def maemo_remove_markup(self):
if self.is_played:
played_string = _('played')
else:
played_string = _('unplayed')
downloaded_string = self.get_age_string()
if not downloaded_string:
downloaded_string = _('today')
return ('<b>%s</b>\n<small>%s; %s; '+_('downloaded %s')+ \
'; '+_('from %s')+'</small>') % (\
xml.sax.saxutils.escape(self.title), \
xml.sax.saxutils.escape(self.filesize_prop), \
xml.sax.saxutils.escape(played_string), \
xml.sax.saxutils.escape(downloaded_string), \
xml.sax.saxutils.escape(self.channel.title))
def age_in_days(self):
return util.file_age_in_days(self.local_filename(create=False, \
check_only=True))
def get_age_string(self):
return util.file_age_to_string(self.age_in_days())
age_prop = property(fget=get_age_string)
def one_line_description( self):
lines = util.remove_html_tags(self.description).strip().splitlines()
if not lines or lines[0] == '':
return _('No description available')
else:
return ' '.join(lines)
def delete_from_disk(self):
try:
self.channel.delete_episode(self)
except:
log('Cannot delete episode from disk: %s', self.title, traceback=True, sender=self)
def find_unique_file_name(self, url, filename, extension):
current_try = util.sanitize_filename(filename, self.MAX_FILENAME_LENGTH)+extension
next_try_id = 2
lookup_url = None
if self.filename == current_try and current_try is not None:
# We already have this filename - good!
return current_try
while self.db.episode_filename_exists(current_try):
current_try = '%s (%d)%s' % (filename, next_try_id, extension)
next_try_id += 1
return current_try
def local_filename(self, create, force_update=False, check_only=False,
template=None):
"""Get (and possibly generate) the local saving filename
Pass create=True if you want this function to generate a
new filename if none exists. You only want to do this when
planning to create/download the file after calling this function.
Normally, you should pass create=False. This will only
create a filename when the file already exists from a previous
version of gPodder (where we used md5 filenames). If the file
does not exist (and the filename also does not exist), this
function will return None.
If you pass force_update=True to this function, it will try to
find a new (better) filename and move the current file if this
is the case. This is useful if (during the download) you get
more information about the file, e.g. the mimetype and you want
to include this information in the file name generation process.
If check_only=True is passed to this function, it will never try
to rename the file, even if would be a good idea. Use this if you
only want to check if a file exists.
If "template" is specified, it should be a filename that is to
be used as a template for generating the "real" filename.
The generated filename is stored in the database for future access.
"""
ext = self.extension(may_call_local_filename=False).encode('utf-8', 'ignore')
# For compatibility with already-downloaded episodes, we
# have to know md5 filenames if they are downloaded already
urldigest = hashlib.md5(self.url).hexdigest()
if not create and self.filename is None:
urldigest_filename = os.path.join(self.channel.save_dir, urldigest+ext)
if os.path.exists(urldigest_filename):
# The file exists, so set it up in our database
log('Recovering pre-0.15.0 file: %s', urldigest_filename, sender=self)
self.filename = urldigest+ext
self.auto_filename = 1
self.save()
return urldigest_filename
return None
# We only want to check if the file exists, so don't try to
# rename the file, even if it would be reasonable. See also:
# http://bugs.gpodder.org/attachment.cgi?id=236
if check_only:
if self.filename is None:
return None
else:
return os.path.join(self.channel.save_dir, self.filename)
if self.filename is None or force_update or (self.auto_filename and self.filename == urldigest+ext):
# Try to find a new filename for the current file
if template is not None:
# If template is specified, trust the template's extension
episode_filename, ext = os.path.splitext(template)
else:
episode_filename, extension_UNUSED = util.filename_from_url(self.url)
fn_template = util.sanitize_filename(episode_filename, self.MAX_FILENAME_LENGTH)
if 'redirect' in fn_template and template is None:
# This looks like a redirection URL - force URL resolving!
log('Looks like a redirection to me: %s', self.url, sender=self)
url = util.get_real_url(self.channel.authenticate_url(self.url))
log('Redirection resolved to: %s', url, sender=self)
(episode_filename, extension_UNUSED) = util.filename_from_url(url)
fn_template = util.sanitize_filename(episode_filename, self.MAX_FILENAME_LENGTH)
# Use the video title for YouTube downloads
for yt_url in ('http://youtube.com/', 'http://www.youtube.com/'):
if self.url.startswith(yt_url):
fn_template = util.sanitize_filename(os.path.basename(self.title), self.MAX_FILENAME_LENGTH)
# If the basename is empty, use the md5 hexdigest of the URL
if len(fn_template) == 0 or fn_template.startswith('redirect.'):
log('Report to bugs.gpodder.org: Podcast at %s with episode URL: %s', self.channel.url, self.url, sender=self)
fn_template = urldigest
# Find a unique filename for this episode
wanted_filename = self.find_unique_file_name(self.url, fn_template, ext)
# We populate the filename field the first time - does the old file still exist?
if self.filename is None and os.path.exists(os.path.join(self.channel.save_dir, urldigest+ext)):
log('Found pre-0.15.0 downloaded file: %s', urldigest, sender=self)
self.filename = urldigest+ext
# The old file exists, but we have decided to want a different filename
if self.filename is not None and wanted_filename != self.filename:
# there might be an old download folder crawling around - move it!
new_file_name = os.path.join(self.channel.save_dir, wanted_filename)
old_file_name = os.path.join(self.channel.save_dir, self.filename)
if os.path.exists(old_file_name) and not os.path.exists(new_file_name):
log('Renaming %s => %s', old_file_name, new_file_name, sender=self)
os.rename(old_file_name, new_file_name)
elif force_update and not os.path.exists(old_file_name):
# When we call force_update, the file might not yet exist when we
# call it from the downloading code before saving the file
log('Choosing new filename: %s', new_file_name, sender=self)
else:
log('Warning: %s exists or %s does not.', new_file_name, old_file_name, sender=self)
log('Updating filename of %s to "%s".', self.url, wanted_filename, sender=self)
elif self.filename is None:
log('Setting filename to "%s".', wanted_filename, sender=self)
else:
log('Should update filename. Stays the same (%s). Good!', \
wanted_filename, sender=self)
self.filename = wanted_filename
self.save()
self.db.commit()
return os.path.join(self.channel.save_dir, self.filename)
def set_mimetype(self, mimetype, commit=False):
"""Sets the mimetype for this episode"""
self.mimetype = mimetype
if commit:
self.db.commit()
def extension(self, may_call_local_filename=True):
filename, ext = util.filename_from_url(self.url)
if may_call_local_filename:
filename = self.local_filename(create=False)
if filename is not None:
filename, ext = os.path.splitext(filename)
# if we can't detect the extension from the url fallback on the mimetype
if ext == '' or util.file_type_by_extension(ext) is None:
ext = util.extension_from_mimetype(self.mimetype)
return ext
def check_is_new(self, downloading=lambda e: False):
"""
Returns True if this episode is to be considered new.
"Downloading" should be a callback that gets an episode
as its parameter and returns True if the episode is
being downloaded at the moment.
"""
return self.state == gpodder.STATE_NORMAL and \
not self.is_played and \
not downloading(self)
def mark_new(self):
self.state = gpodder.STATE_NORMAL
self.is_played = False
self.db.update_episode_state(self)
def mark_old(self):
self.is_played = True
self.db.update_episode_state(self)
def file_exists(self):
filename = self.local_filename(create=False, check_only=True)
if filename is None:
return False
else:
return os.path.exists(filename)
def was_downloaded(self, and_exists=False):
if self.state != gpodder.STATE_DOWNLOADED:
return False
if and_exists and not self.file_exists():
return False
return True
def sync_filename(self, use_custom=False, custom_format=None):
if use_custom:
return util.object_string_formatter(custom_format,
episode=self, podcast=self.channel)
else:
return self.title
def file_type(self):
# Assume all YouTube links are video files
if youtube.is_video_link(self.url):
return 'video'
return util.file_type_by_extension(self.extension())
@property
def basename( self):
return os.path.splitext( os.path.basename( self.url))[0]
@property
def published( self):
"""
Returns published date as YYYYMMDD (or 00000000 if not available)
"""
try:
return datetime.datetime.fromtimestamp(self.pubDate).strftime('%Y%m%d')
except:
log( 'Cannot format pubDate for "%s".', self.title, sender = self)
return '00000000'
@property
def pubtime(self):
"""
Returns published time as HHMM (or 0000 if not available)
"""
try:
return datetime.datetime.fromtimestamp(self.pubDate).strftime('%H%M')
except:
log('Cannot format pubDate (time) for "%s".', self.title, sender=self)
return '0000'
def cute_pubdate(self):
result = util.format_date(self.pubDate)
if result is None:
return '(%s)' % _('unknown')
else:
return result
pubdate_prop = property(fget=cute_pubdate)
def calculate_filesize( self):
filename = self.local_filename(create=False)
if filename is None:
log('calculate_filesized called, but filename is None!', sender=self)
try:
self.length = os.path.getsize(filename)
except:
log( 'Could not get filesize for %s.', self.url)
def get_filesize_string(self):
return util.format_filesize(self.length)
filesize_prop = property(fget=get_filesize_string)
def get_played_string( self):
if not self.is_played:
return _('Unplayed')
return ''
played_prop = property(fget=get_played_string)
def is_duplicate(self, episode):
if self.title == episode.title and self.pubDate == episode.pubDate:
log('Possible duplicate detected: %s', self.title)
return True
return False
def duplicate_id(self):
return hash((self.title, self.pubDate))
def update_from(self, episode):
for k in ('title', 'url', 'description', 'link', 'pubDate', 'guid'):
setattr(self, k, getattr(episode, k))
| gpl-3.0 | 8,254,401,442,655,830,000 | 37.606191 | 140 | 0.596093 | false | 4.053354 | false | false | false |
renatopp/aerolito | setup.py | 1 | 1829 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Renato de Pontes Pereira'
__author_email__ = '[email protected]'
__version__ = '0.1'
__date__ = '2011 10 15'
try:
import setuptools
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
long_description = '''
Aerolito is an AIML alternative based on YAML. Aerolito provides features
for natural language processing simulation. Example of usage::
from aerolito import Kernel
kernel = Kernel('config.yml')
print kernel.respond(u'Hello')
'''
setup(
name='aerolito',
version = __version__,
author = __author__,
author_email=__author_email__,
license='MIT License',
url='http://renatopp.com/aerolito',
download_url='https://github.com/renatopp/aerolito/',
description='Python library for natural language processing simulation',
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup'
],
keywords='artificial intelligence natural language processing simulation yaml aiml markup aerolito',
packages=['aerolito'],
install_requires=['pyyaml'],
)
| mit | -8,713,495,379,147,179,000 | 31.087719 | 104 | 0.65883 | false | 4.04646 | false | false | false |
apdjustino/DRCOG_Urbansim | src/drcog/models/gwr_hedonic.py | 1 | 19704 | import numpy as np, pandas as pd, os
from synthicity.utils import misc
from drcog.models import dataset
dset = dataset.DRCOGDataset(os.path.join(misc.data_dir(),'drcog.h5'))
np.random.seed(1)
import statsmodels.api as sm
#import pygwr_kernel
import random
"""
This program estimates an hedonic model for prices of residential and non-residential buildings. The benchmark method
combines:
1/ A geographically weighted regression to account for spatial non-stationarity
2/ Poisson or Negative Binonial General Linear Model to estimate a log-linear model with heteroskedastic error terms
3/ Zone employment (later-on when the data is fixed, zone average income or household characteristics)
is instrumented with average buildings characteristics in neighboring zones.
The program is organized in four parts:
1/ Create a dataset for estimation
2/ Run the first stage least squares (average zonal employment regressed on county fixed effect and
neighboring zones characteristics). The predicted zonal employment is used as an instrument in all following regressions
3/ Run a GLM GWR methods and obtain local hedonoc parameters.
4/ Generate average coefficient for each zone
"""
## Part 1: extract variables and build dataset for estimation
def data_estimation(dset, buildings,parcels,fars,zones,establishments, bid):
bp=buildings
p=parcels
f=fars
z=zones
e=establishments
print bp.columns
## Construct additional buildings variables related to zone or parcel characteristics
bp['zone_id']= p.zone_id[bp.parcel_id].values
bp['dist_bus']= (p.dist_bus[bp.parcel_id].values)
bp['ln_dist_bus']=np.log(bp['dist_bus'])
bp['dist_rail']= (p.dist_rail[bp.parcel_id].values)
bp['ln_dist_rail']=np.log(bp['dist_rail'])
bp['county_id']= p.county_id[bp.parcel_id].values
bp['centroid_x']= p.centroid_x[bp.parcel_id].values
bp['centroid_y']= p.centroid_y[bp.parcel_id].values
bp['year20']=pd.DataFrame((bp['year_built']>1995)*(bp['year_built']<2000)).applymap(lambda x: 1 if x else 0)
bp['year10']=pd.DataFrame((bp['year_built']>1999)*(bp['year_built']<2006)).applymap(lambda x: 1 if x else 0)
bp['year0']=pd.DataFrame((bp['year_built']>2005)).applymap(lambda x: 1 if x else 0)
f['far_id']=f.index
p['far']=f.far[p.far_id].values
bp['far']= p.far[bp.parcel_id].values
bp['high_land_area']=pd.DataFrame((bp['land_area']>7000)).applymap(lambda x: 1 if x else 0)
bp['ln_nres_sqft']=np.log(bp['non_residential_sqft'])
bp['ln_res_sqft']=np.log(bp['bldg_sq_ft'])
bp['unit_price_res_sqft']=bp[bp['bldg_sq_ft']>0]['unit_price_residential']/bp['bldg_sq_ft']
### neighborhood (right now zone , later on, use a kernel) characteristics
e['zone_id'] = bp.zone_id[e.building_id].values
bp['building_id']=bp.index
u=pd.DataFrame(e.groupby('building_id').sector_id.sum())
u.columns=['sector_id']
bp=pd.merge(bp,u, left_on='building_id', right_index=True, how='outer')
z['zone_id']=z.index
z['far']=p.far[z.zone_id].values
z['sector_id_5']=bp[bp['building_type_id']==5].groupby('zone_id').sector_id.sum()
z['sector_id_5_out']=dset.compute_range( z['sector_id_5'],15, agg=np.sum)-\
dset.compute_range( z['sector_id_5'],5, agg=np.sum)
z['sector_id_22']=bp[bp['building_type_id']==22].groupby('zone_id').sector_id.sum()
z['sector_id_22_out']=dset.compute_range( z['sector_id_22'],15, agg=np.sum)-\
dset.compute_range( z['sector_id_22'],5, agg=np.sum)
z['sector_id_18']=bp[bp['building_type_id']==18].groupby('zone_id').sector_id.sum()
z['sector_id_18_out']=dset.compute_range( z['sector_id_18'],15, agg=np.sum)-\
dset.compute_range( z['sector_id_18'],5, agg=np.sum)
z['emp_sector_mean'] = e.groupby('zone_id').employees.mean()
z['emp_sector_agg'] = e.groupby('zone_id').employees.sum()
z['nr_sqft_mean']=bp.groupby('zone_id').non_residential_sqft.mean()
z['nr_price_mean']=bp.groupby('zone_id').unit_price_non_residential.mean()
z['r_sqft_mean']=bp[(bp['building_type_id']==bid)*(bp['bldg_sq_ft']>0)].groupby('zone_id').ln_res_sqft.sum()
z['ln_r_sqft_mean']=np.log( z['r_sqft_mean'])
z['nr_sqft_agg']=bp.groupby('zone_id').non_residential_sqft.sum()
z['nr_stories_mean']=bp.groupby('zone_id').stories.sum()
z['year0_mean']=bp.groupby('zone_id').year0.mean()
z['nr_sqft_30_10']=dset.compute_range(z['nr_sqft_mean'], 30)- z['nr_sqft_mean']
z['r_sqft_30_10']=dset.compute_range(z[np.isfinite(z['ln_r_sqft_mean'])]['ln_r_sqft_mean'], 15, agg=np.sum)- \
dset.compute_range(z[np.isfinite(z['ln_r_sqft_mean'])]['ln_r_sqft_mean'], 5, agg=np.sum)
z['far_30_10']=dset.compute_range(z[np.isfinite(z['far'])]['far'], 15, agg=np.mean)- \
dset.compute_range(z[np.isfinite(z['far'])]['far'], 5, agg=np.mean)
#z['ln_r_sqft_mean']
z['stories_30_10']=dset.compute_range(z['nr_stories_mean'], 15)-\
dset.compute_range(z['nr_stories_mean'], 5, agg=np.sum)
z['nr_year0_30_10']=dset.compute_range(z['year0_mean'], 30)- z['year0_mean']
# Larger Area Characteristics
z['emp_sector_mean_30']=dset.compute_range(z['emp_sector_mean'], 30, agg=np.mean)
z['nr_sqft_30']=dset.compute_range(z['nr_sqft_mean'], 15,agg=np.mean)
z['r_sqft_30']=dset.compute_range(z['ln_r_sqft_mean'], 15, agg=np.sum)
bp['emp_sector_mean_30']=z.emp_sector_mean_30[bp.zone_id].values
bp['emp_sector_10']=z.emp_sector_mean[bp.zone_id].values
bp['year0_10']=z.year0_mean[bp.zone_id].values
bp['stories_10']=z.nr_stories_mean[bp.zone_id].values
bp['nr_sqft_30_10']=z.nr_sqft_30_10[bp.zone_id].values
bp['stories_30_10']=z.stories_30_10[bp.zone_id].values
bp['nr_year0_30_10']=z.nr_year0_30_10[bp.zone_id].values
bp['nr_sqft_10']=z.nr_sqft_mean[bp.zone_id].values
bp['nr_price_10']=z.nr_price_mean[bp.zone_id].values
bp['nr_sqft_30']=z.nr_sqft_30[bp.zone_id].values
bp['r_sqft_30_10']=z.r_sqft_30_10[bp.zone_id].values
bp['r_sqft_10']=z.r_sqft_mean[bp.zone_id].values
bp['r_sqft_30']=z.r_sqft_30[bp.zone_id].values
#bp['nr_sqft_agg']=z.nr_sqft_agg[bp.zone_id].values
bp['ln_nr_sqft_30_10']=np.log(bp['nr_sqft_30_10'])
bp['ln_nr_sqft_30']=np.log(bp['nr_sqft_30'])
bp['ln_nr_sqft_10']=np.log(bp['nr_sqft_10'])
bp['ln_r_sqft_30_10']=np.log(bp['r_sqft_30_10'])
bp['ln_r_sqft_30']=np.log(bp['r_sqft_30'])
bp['ln_r_sqft_10']=np.log(bp['r_sqft_10'])
bp['ln_emp_30']=np.log(bp['emp_sector_mean_30'])
bp['ln_emp_10']=np.log(bp['emp_sector_10'])
bp['ln_sqft_zone']=-np.log(bp.bldg_sq_ft)+bp['r_sqft_10']
bp['ln_sqft_out']=-np.log(bp.bldg_sq_ft)+bp['r_sqft_30']
bp['ln_stories_zone']=-bp['stories']+bp['stories_10']
bp['ln_sqft_out']=-np.log(bp.bldg_sq_ft)+bp['r_sqft_30_10']
bp['ln_stories_out']=bp['stories_30_10']
bp['sector_id_5']=z.sector_id_5[bp.zone_id].values
bp['sector_id_5_out']=z.sector_id_5_out[bp.zone_id].values
bp['sector_id_18']=z.sector_id_18[bp.zone_id].values
bp['sector_id_18_out']=z.sector_id_18_out[bp.zone_id].values
bp['sector_id_22']=z.sector_id_22[bp.zone_id].values
bp['sector_id_22_out']=z.sector_id_22_out[bp.zone_id].values
#bp=bp[bp['building_type_id']==bid]
del e
del p
dset.d['buildings']=bp
dset.d['zones']=z
return dset
# Part1bis:income and age
def income_data(dset):
df_marg=pd.read_csv('C:\Users\XGitiaux\Documents\Tableau\Census/inc_age_marg.csv', index_col='zone_id')
df_price=pd.read_csv('C:\Users\XGitiaux\Documents\Tableau\Census/ZillowPriceZone.csv')
df_marg['zone_id']=df_marg.index
print df_marg[df_marg['zone_id']==1609]['zone_id']
df_marg=pd.merge(df_price, df_marg, left_on='zone_id', right_index=True)
df_marg.index=df_marg['zone_id']
df_marg.fillna(0, inplace=True)
df=pd.DataFrame(df_marg.index, index=df_marg.index)
print df_marg.PriceZ
df['low_income_25_44']=df_marg['Householder 25 to 44 years:Less than $10,000']+df_marg['Householder 25 to 44 years:$10,000 to $14,999']\
+df_marg[ 'Householder 25 to 44 years:$15,000 to $19,999']\
+df_marg['Householder 25 to 44 years:$20,000 to $24,999']+\
df_marg['Householder 25 to 44 years:$25,000 to $29,999']
df['low_income_45_64']=df_marg['Householder 45 to 64 years:Less than $10,000']+\
df_marg['Householder 45 to 64 years:$10,000 to $14,999']\
+df_marg[ 'Householder 45 to 64 years:$15,000 to $19,999']\
+df_marg['Householder 45 to 64 years:$20,000 to $24,999']+\
df_marg['Householder 45 to 64 years:$25,000 to $29,999']
df['low_income_65']=df_marg['Householder 65 years and over:Less than $10,000']+\
df_marg['Householder 65 years and over:$10,000 to $14,999']\
+df_marg[ 'Householder 65 years and over:$15,000 to $19,999']\
+df_marg['Householder 65 years and over:$20,000 to $24,999']+\
df_marg['Householder 65 years and over:$25,000 to $29,999']
df['high_income_25_44']=df_marg['Householder 25 to 44 years:$100,000 to $124,999']+\
df_marg['Householder 25 to 44 years:$125,000 to $149,999']+\
df_marg['Householder 25 to 44 years:$150,000 to $199,999']+\
df_marg['Householder 25 to 44 years:$200,000 or more']+\
df_marg['Householder 25 to 44 years:$60,000 to $74,999']+\
df_marg['Householder 25 to 44 years:$75,000 to $99,999']
df['high_income_45_64']=df_marg['Householder 45 to 64 years:$100,000 to $124,999']+\
df_marg['Householder 45 to 64 years:$125,000 to $149,999']+\
df_marg['Householder 45 to 64 years:$150,000 to $199,999']+\
df_marg['Householder 45 to 64 years:$200,000 or more']+\
df_marg['Householder 45 to 64 years:$60,000 to $74,999']+\
df_marg['Householder 45 to 64 years:$75,000 to $99,999']
df['high_income_65']=df_marg['Householder 65 years and over:$100,000 to $124,999']+\
df_marg['Householder 65 years and over:$125,000 to $149,999']+\
df_marg['Householder 65 years and over:$150,000 to $199,999']+\
df_marg['Householder 65 years and over:$200,000 or more']+\
df_marg['Householder 65 years and over:$60,000 to $74,999']+\
df_marg['Householder 65 years and over:$75,000 to $99,999']
# Create a csv file for Tableau
print dset[(dset['bldg_sq_ft']>0)*(dset['building_type_id']==20)]['unit_price_res_sqft']
dset=pd.merge(dset, df, left_on='zone_id', right_index=True, how='outer')
df['price_bid=20']=df_marg['PriceZ']
print dset[(dset['building_type_id']==20)*(np.isfinite(dset['unit_price_res_sqft']))*(dset['county_id']==8035)].groupby('zone_id').unit_price_res_sqft.mean()
df['zone_id']=df.index
df=df[np.isfinite(df['price_bid=20'])]
df.to_csv('C:\Users\XGitiaux\Documents\Tableau\Census/UBSprice_income5.csv')
return dset
## Part 2: Instrument for employment
def instrument(dset, instrumented, instr, ind_vars):
print "Step: Instrument Variables"
### Make sure there is no nan
z=dset
for varname in instrumented:
z=z[np.isfinite(z[varname])]
for varname in instr:
z=z[np.isfinite(z[varname])]
for varname in ind_vars:
z=z[np.isfinite(z[varname])]
### Independent variables including fixed effects
#x=pd.get_dummies(z['county_id'])
x=pd.DataFrame(index=z.index)
for varname in ind_vars:
x[varname]=z[varname]
for varname in instr:
x[varname]=z[varname]
x=sm.add_constant(x,prepend=False)
### Dependent Variables
y=pd.DataFrame(z[instrumented])
print len(y)
print len(x)
### Regression
regression_results=sm.OLS(y,x).fit()
print regression_results.summary()
### Return the instrument
out=pd.DataFrame(z.index)
for varname in instrumented:
out[varname+"_iv"]=regression_results.predict()
return out
## Part 3: main regression using GWR
def global_hedonic(dset,depvar, ind_vars, bid, instrumented=None, instr=None, dsetiv=None, ind_variv=None, fixed_effect=False):
### Instrument
#dsetiv=dsetiv[dsetiv['building_type_id']==bid]
for varname in instrumented:
out=instrument(dsetiv, instrumented, instr, ind_variv)
dset=pd.merge(dset, out, left_on='zone_id', right_index=True)
## Make sure there is no nan
b=dset[dset['building_type_id']==bid]
for varname in instrumented:
b=b[np.isfinite(b[varname])]
b=b[~np.isnan(b[varname])]
for varname in ind_vars:
b=b[np.isfinite(b[varname])]
b=b[~np.isnan(b[varname])]
for varname in depvar:
b=b[np.isfinite(b[varname])]
b=b[~np.isnan(b[varname])]
### Independent variables including fixed effects
if fixed_effect==True:
x=pd.get_dummies(b.county_id)
x['zone_id']=b['zone_id']
x=sm.add_constant(x,prepend=False)
else:
x=pd.DataFrame(b.zone_id)
print b
for varname in ind_vars:
x[varname]=b[varname]
### Adding Instrument
if len(instrumented)*len(instr)* len(dsetiv)*len(ind_variv)!=0:
for varname in instrumented:
x[varname]=b[varname+"_iv"]
else:
for varname in instrumented:
x[varname]=b[varname]
x=sm.add_constant(x,prepend=False)
del x['zone_id']
print b['ln_stories_out']
### Dependent Variables
y=pd.DataFrame(b[depvar])
### Regression
print x
print y
#regression_results=sm.GLM(y,x, family=sm.families.NegativeBinomial()).fit()
regression_results=sm.OLS(y,x).fit()
out_parm=(regression_results.params).T
print out_parm
print regression_results.summary()
#### Coefficient
out=pd.DataFrame(index=b.index)
i=0
for varname in list(x.columns.values):
out[varname]=out_parm[i]
i=i+1
out['zone_id']=b['zone_id']
print out
out.to_csv('C:\urbansim\output\global_coeff_'+str(bid)+'.csv')
return out
def kernel_hedonic(dset,depvar, ind_vars, bid, bandwidth, instrumented=None, instr=None, dsetiv=None, ind_variv=None, fixed_effect=False):
### Instrument
dsetiv=dsetiv[dsetiv['building_type_id']==bid]
for varname in instrumented:
out=instrument(dsetiv, instrumented, instr, ind_variv)
dset=pd.merge(dset, out, left_on='zone_id', right_index=True)
## Make sure there is no nan
b=dset[dset['building_type_id']==bid]
for varname in instrumented:
b=b[np.isfinite(b[varname])]
for varname in ind_vars:
b=b[np.isfinite(b[varname])]
for varname in depvar:
b=b[np.isfinite(b[varname])]
### Independent variables including fixed effects
if fixed_effect==True:
x=pd.get_dummies(b.county_id)
else:
x=pd.DataFrame(index=b.index)
#x=sm.add_constant(x,prepend=False)
for varname in ind_vars:
x[varname]=b[varname]
### Adding Instrument
if len(instrumented)*len(instr)* len(dsetiv)*len(ind_variv)!=0:
for varname in instrumented:
x[varname]=b[varname+"_iv"]
else:
for varname in instrumented:
x[varname]=b[varname]
### Dependent Variables
print b[depvar]
y=pd.DataFrame(b[depvar])
### Locations
g=pd.DataFrame(b['centroid_x'])
g['centroid_y']=b['centroid_y']
### GWR
y=np.array(y,dtype=np.float64 )
xv=np.array(x,dtype=np.float64 )
g=np.array(g,dtype=np.float64 )
model = pygwr_kernel.GWR(targets=y, samples=xv, locations=g)
print "Estimating GWR model at all data points..."
gwr_predict,gwr_parm = model.estimate_at_target_locations(bandwidth)
print gwr_predict
### Report coefficients
out=pd.DataFrame(index=b.index)
i=0
for varname in list(x.columns.values):
out[varname]=gwr_parm[:,i]
i=i+1
out['const']=gwr_parm[:,i]
out['zone_id']=b['zone_id']
out.to_csv('C:\urbansim\output\coeff_'+str(bid)+'.csv')
return out
def estimate_hedonic(dset,depvar, ind_vars, bid, bandwidth, instrumented=None, instr=None, dsetiv=None, ind_variv=None, fixed_effect=False):
if bandwidth!=0:
for i in bid:
kernel_hedonic(dset,depvar, ind_vars, i, bandwidth, instrumented, instr, dsetiv, ind_variv, fixed_effect)
else:
for i in bid:
dset[depvar]=np.log(dset[depvar])
global_hedonic(dset, depvar, ind_vars, i, instrumented, instr, dsetiv, ind_variv, fixed_effect)
"""
ind_vars=['stories', 'ln_nres_sqft', 'high_land_area', 'year0', 'year10','year20', 'ln_dist_bus', 'far', 'ln_emp_30',
'ln_nr_sqft_10', 'ln_nr_sqft_30', 'ln_emp_10']
ind_vars2=[ 'ln_nr_sqft_10', 'ln_nr_sqft_30', 'ln_emp_30', ]
dset=data_estimation(dset,dset.buildings, dset.parcels, dset.fars, dset.zones, dset.establishments, 20)
b=dset.buildings
b=income_data(b)
b=b[b['unit_price_res_sqft']>0]
#Randomly hold back 25 % of the sample
#b=b.ix[random.sample(b.index, int(len(b)))]
z=dset.zones
b.drop_duplicates(['stories', 'ln_res_sqft', 'high_land_area', 'year0', 'year10','year20', 'ln_dist_bus', 'far', 'ln_emp_30',
'ln_nr_sqft_10', 'ln_nr_sqft_30', 'ln_emp_10'], inplace=True)
#b=b[b['unit_price_non_residential']<10000]
ind_vars=['stories', 'ln_res_sqft', 'high_land_area', 'year0', 'year10','year20', 'ln_dist_bus','ln_sqft_zone',
'far', 'ln_sqft_out','ln_stories_zone', 'ln_stories_out', 'ln_emp_30','ln_emp_10','low_income_25_44', 'low_income_45_64',
'low_income_65', 'high_income_25_44', 'high_income_65', 'high_income_45_64']
ind_vars2=['stories', 'ln_res_sqft', 'high_land_area', 'year0', 'year10','year20', 'ln_dist_bus', 'ln_sqft_zone',
'ln_sqft_out', 'far', 'ln_stories_zone', 'ln_stories_out','low_income_25_44','low_income_25_44', 'low_income_45_64',
'low_income_65', 'high_income_25_44', 'high_income_65', 'high_income_45_64']
out=estimate_hedonic(b,['unit_price_residential'],ind_vars
, [20],0, instrumented=[], instr=['sector_id_5', 'sector_id_5_out','sector_id_18', 'sector_id_18_out',
'sector_id_22', 'sector_id_22_out', ]
, dsetiv=b, ind_variv=ind_vars2, fixed_effect=False)
"""
## Part 4: Create average hedonic coefficients for each zone. These coefficients will be used in simulations to compute
## average zone price. If there is no building in the zone, we use county level average.
def estimate_zone(dset, ind_vars, bid):
listc=[]
ind_vars=ind_vars+['const']
for b in bid:
df=pd.read_csv('C:/urbansim/output/global_coeff_'+str(b)+'.csv')
## Need county_id
p=pd.DataFrame(dset.parcels.zone_id)
p['county_id']=dset.parcels.county_id
list=[]
for x in ind_vars:
u=pd.DataFrame(df.groupby('zone_id')[x].mean())
u.columns=[x]
v=pd.merge(p,u,left_on='zone_id', right_index=True, how='outer')
## Use county average if no zone average
wc=pd.DataFrame(v[~np.isnan(v[x])].groupby('county_id')[x].mean())
wc.columns=['county_'+x]
v=pd.merge(v,wc,left_on='county_id', right_index=True)
v[x].fillna(v['county_'+x], inplace=True)
w=pd.DataFrame(v.groupby('zone_id')[x].mean())
w.columns=[x]
list.append(w)
coeff=pd.concat(list, axis=1)
coeff['zone_id']=coeff.index
coeff['bid']=b
listc.append(coeff)
coeff_dset=pd.concat(listc)
return coeff_dset
| agpl-3.0 | 8,107,236,233,041,884,000 | 37.484375 | 161 | 0.625355 | false | 2.775602 | false | false | false |
CZ-NIC/thug | src/Classifier/BaseClassifier.py | 5 | 2022 | #!/usr/bin/env python
#
# BaseClassifier.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import os
import yara
import logging
from .abstractmethod import abstractmethod
log = logging.getLogger("Thug")
class BaseClassifier:
def __init__(self):
self._rules = dict()
self.matches = list()
self.namespace_id = 1
self.init_rules()
def init_rules(self):
p = getattr(self, 'default_rule_file', None)
if p is None:
log.warn("[%s] Skipping not existing default classification rule file" % (self.classifier, ))
return
r = os.path.join(os.path.dirname(os.path.abspath(__file__)), p)
if not os.path.exists(r):
log.warn("[%s] Skipping not existing default classification rule file" % (self.classifier, ))
return
self._rules['namespace0'] = r
self.rules = yara.compile(filepaths = self._rules)
def add_rule(self, rule_file):
if not os.path.exists(rule_file):
log.warn("[%s] Skipping not existing classification rule file %s" % (self.classifier, rule_file, ))
return
self._rules["namespace%s" % (self.namespace_id, )] = rule_file
self.namespace_id += 1
self.rules = yara.compile(filepaths = self._rules)
@abstractmethod
def classify(self):
pass
@property
def result(self):
return self.matches
| gpl-2.0 | -3,254,794,300,303,039,000 | 31.612903 | 111 | 0.652324 | false | 3.94152 | false | false | false |
JungWinter/HongikFood | app/menu.py | 1 | 9137 | from app import db
from .models import Menu, Poll
class PlaceMenu():
def __init__(self, place):
self.title = None # date + place
self.date = None
self.dayname = None
self.items = {
"아침": {
"정보": None,
"메뉴": [],
"평점": None,
},
"점심": {
"정보": None,
"메뉴": [],
"평점": None,
},
"저녁": {
"정보": None,
"메뉴": [],
"평점": None,
},
}
self.place = place
self.price = None
def test(self):
print("%s PlaceMenu TEST" % self.place)
print("title : %s" % self.title)
print("dayname : %s" % self.dayname)
print("date : %s" % self.date)
print("아침 정보 : %s" % self.items["아침"]["정보"])
print("점심 정보 : %s" % self.items["점심"]["정보"])
print("저녁 정보 : %s" % self.items["저녁"]["정보"])
print("아침 : %s" % " ".join(self.items["아침"]["메뉴"]))
print("점심 : %s" % " ".join(self.items["점심"]["메뉴"]))
print("저녁 : %s" % " ".join(self.items["저녁"]["메뉴"]))
def returnMenu(self, summary, time=None):
'''
최종 메시지의 형태
2016.11.11 금요일
□ 남문관 (3,500원)
■ 점심 (11:00-15:00)
수제탕수육
쌀밥
...
■ 저녁 (16:30-18:30)
제육볶음
쌀밥
...
'''
timelist = ["아침", "점심", "저녁"]
message = ""
# if not time:
# message += "{} {}\n".format(self.date, self.dayname)
if self.price == "":
message += "□ {}\n".format(self.place)
else:
message += "□ {} ({})\n".format(self.place, self.price)
# 메뉴 정보가 아예 없으면
if not any([self.items[t]["메뉴"] for t in timelist]):
message += "식단 정보가 없습니다.\n"
return message
for key in timelist:
if time and key != time:
continue
# 메뉴가 비어있으면 건너뛰기
if self.items[key]["메뉴"]:
if self.items[key]["정보"] == "":
if self.place == "남문관" and key == "아침":
message += "■ 점심: 한식\n"
elif self.place == "남문관" and key == "점심":
message += "■ 점심: 양식\n"
else:
message += "■ {}\n".format(key)
else:
if self.place == "남문관" and key == "아침":
message += "■ 점심: 한식 ({})\n".format(self.items[key]["정보"])
elif self.place == "남문관" and key == "점심":
message += "■ 점심: 양식 ({})\n".format(self.items[key]["정보"])
else:
message += "■ {} ({})\n".format(
key,
self.items[key]["정보"]
)
# 평점 붙여주기
message += "▶ " + self.items[key]["평점"] + "\n"
# 메뉴 붙여주기
menus = self.items[key]["메뉴"][:]
if summary:
# 쌀밥 제외
if "쌀밥" in menus:
menus.remove("쌀밥")
message += "\n".join(menus[:4]) + "\n"
else:
message += "\n".join(menus) + "\n"
return message
def updateDate(self, date):
self.dayname = date[0]
self.date = date[1]
def updateMenu(self, menu):
'''
menu의 길이가 2면 아침없음
3이면 아침있음
'''
time = ["저녁", "점심", "아침"]
reverseMenu = list(reversed(menu))
for index, item in enumerate(reverseMenu):
self.items[time[index]]["메뉴"] = item
menu = ",".join(item)
# m = DBAdmin.query(Menu, self.date, self.place, time[index])
m = Menu.query.filter_by(
date=self.date,
place=self.place,
time=time[index]).first()
if not m: # 결과값 없음
if item: # 빈 값이 아니면
# DBAdmin.addMenu(self.date, self.place, time[index], menu)
m = Menu(self.date, self.place, time[index], menu)
db.session.add(m)
db.session.commit()
else: # 결과값 있음
if m.menu != menu: # 비교해봐야지
m.menu = menu
# DBAdmin.commit()
db.session.commit()
def updateScore(self):
for time in self.items:
# self.items[time]
m = Menu.query.filter_by(
date=self.date,
place=self.place,
time=time
).first()
if m: # 결과값 있음
polls = Poll.query.filter_by(menu=m).all()
count = len(polls)
if count: # 0 이상임
scoreSum = sum(p.score for p in polls)
self.items[time]["평점"] = "%.1f / 5.0" % (scoreSum / count)
else:
self.items[time]["평점"] = "평가없음"
class DayMenu():
def __init__(self, dayname):
self.title = None # date + dayname
self.date = None
self.items = [
PlaceMenu("학생회관"),
PlaceMenu("남문관"),
PlaceMenu("교직원"),
PlaceMenu("신기숙사"),
# PlaceMenu("제1기숙사"),
]
self.dayname = dayname
info = [
# 학관 정보
"",
"11:00-14:00",
"17:00-19:00",
# 남문관 정보
"",
"11:30-14:00",
"17:30-19:00",
# 교직원 정보
"",
"11:30-14:20",
"17:00-19:20",
# 신기숙사 정보
"8:00-9:00",
"11:30-14:30",
"17:30-18:50"
]
time = ["아침", "점심", "저녁"]
price = ["3,900원", "3,900원", "6,000원", "3,900원"]
for place in self.items:
place.price = price.pop(0)
for t in time:
place.items[t]["정보"] = info.pop(0)
def returnAllMenu(self, summary):
message = "{} {}\n".format(self.date, self.dayname)
if summary:
message += "> 간추린 메뉴입니다.\n"
message += "> 쌀밥은 제외했습니다.\n"
for place in self.items:
message += place.returnMenu(summary=summary) + "\n"
if summary:
message += "\n오른쪽으로 넘기시면 다른 버튼도 있습니다.\n"
# 특정 메시지 전달 때 여기에 추가
# message += ""
return message
def returnPlaceMenu(self, place):
'''
search 함수도 필요할 듯
'''
name = ["학생회관", "남문관", "교직원", "신기숙사"]
message = self.items[name.index(place)].returnMenu(summary=False)
return message
def returnTimeMenu(self, time):
message = "{} {}\n".format(self.date, self.dayname)
for place in self.items:
message += place.returnMenu(summary=False, time=time) + "\n"
return message
def returnScore(self):
self.updateScore()
message = ""
times = ["아침", "점심", "저녁"]
for place in self.items:
for time in times:
if place.items[time]["메뉴"]:
message += "{} {} : {}\n".format(
place.place,
time,
place.items[time]["평점"]
)
return message
def updateSelf(self, date):
'''
아마 맞겠지만 그래도 검증
'''
if self.dayname == date[0]:
self.date = date[1]
return True
else:
return False
def updateScore(self):
for place in self.items:
place.updateScore()
def update(self, date, menu):
'''
받은 메뉴 쪼개기
하루에 총 10개고 4개로 나눠야함
2 / 3 / 2 / 3
'''
divMenu = []
divMenu.append([menu[0], menu[1]])
divMenu.append([menu[2], menu[3], menu[4]])
divMenu.append([menu[5], menu[6]])
divMenu.append([menu[7], menu[8], menu[9]])
if self.updateSelf(date):
for index, item in enumerate(self.items):
item.updateDate(date)
item.updateMenu(divMenu[index])
item.updateScore()
| mit | 2,723,474,008,122,156,500 | 30.522901 | 82 | 0.407676 | false | 2.709646 | false | false | false |
AllanSSX/IonNIPT | utils/coverageYspecificGenes.py | 1 | 4361 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#================================================================#
import os
import sys
import re
import json
import subprocess
#================================================================#
def main():
# Y linked genes
Y = {
"HSFY1" : "chrY:20708557-20750849",
#"HSFY2" : "chrY:20893326-20990548",
"BPY2" : "chrY:25119966-25151612",
"BPY2B" : "chrY:26753707-26785354",
"BPY2C" : "chrY:27177048-27208695",
"XKRY " : "chrY:19880860-19889280",
"PRY" : "chrY:24636544-24660784",
"PRY2" : "chrY:24217903-24242154"
}
path_run = sys.argv[1] # folder where bam (and json files for Ion Torrent server) is/are located
# ### Ion Torrent method
# # load run info
# dpniDict = {}
#
# json_file = '%s/ion_params_00.json' % path_run
# json_load = json.load(open(json_file))
# runname = json_load['exp_json']['log']['runname'] #.split('-DANNI')[0]
# # runname ='-'.join(runname.split('-')[0:2])
#
# dpniDict[runname] = []
#
# #get sample and barcode name
# for sample, barcode in json_load['experimentAnalysisSettings']['barcodedSamples'].items():
#
# sample_name = sample.replace(' ', '_') # just in case...
# barcode_name = barcode['barcodeSampleInfo'].keys()[0]
#
# dpniDict[runname].append([barcode_name, sample_name])
#
# for run, design in dpniDict.items():
# for sample in design:
#
# barcode_id = sample[0]
# sample_name = sample[1]
# name = run + "_" + sample_name
# bam = os.path.join(path_run, barcode_id)+"_rawlib.bam"
#
# Yl_reads = []
# Y_reads = []
#
# #coverage Y linked genes
# for gene,coord in Y.items():
# cmd = "samtools view -c {bam} {coord}".format(bam = bam, coord = coord)
# process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stdout, stderr = process.communicate()
# if process.returncode != 0:
# raise Exception(stderr)
# else:
# Yl_reads.append(int(stdout[:-1]))
#
# cmd = "samtools view -c {bam} {coord}".format(bam = bam, coord = "chrY")
# process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stdout, stderr = process.communicate()
# if process.returncode != 0:
# raise Exception(stderr)
# else:
# Y_reads.append(int(stdout[:-1]))
#
# print name, sum(Yl_reads), Y_reads[0], float(sum(Yl_reads))*100/float(Y_reads[0]), Yl_reads
### cluster method
for bam in os.listdir(path_run):
if bam.endswith(".bam") or bam.endswith(".cram"):
name = os.path.basename(bam).split('.')[0]
bam_path = os.path.join(path_run, bam)
Yl_reads = []
Y_reads = []
for gene,coord in Y.items():
cmd = "samtools view -c {bam} {coord}".format(bam = bam_path, coord = coord)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise Exception(stderr)
else:
Yl_reads.append(int(stdout[:-1]))
cmd = "samtools view -c {bam} {coord}".format(bam = bam_path, coord = "chrY")
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise Exception(stderr)
else:
Y_reads.append(int(stdout[:-1]))
if int(Y_reads[0]) != 0:
print name, sum(Yl_reads), Y_reads[0], float(sum(Yl_reads))*100/float(Y_reads[0]), Yl_reads
else:
print name, sum(Yl_reads), Y_reads[0], 0, Yl_reads
if __name__ == '__main__':
main() | gpl-3.0 | 8,446,687,236,552,019,000 | 37.60177 | 109 | 0.494153 | false | 3.542648 | false | false | false |
pydata/conf_site | symposion/schedule/models.py | 1 | 7752 | from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils.translation import gettext_lazy as _
from autoslug.fields import AutoSlugField
from symposion.markdown_parser import parse
from symposion.proposals.models import ProposalBase, AdditionalSpeaker
from symposion.conference.models import Section
from symposion.speakers.models import Speaker
class Schedule(models.Model):
section = models.OneToOneField(
Section, on_delete=models.CASCADE, verbose_name=_("Section")
)
published = models.BooleanField(default=True, verbose_name=_("Published"))
hidden = models.BooleanField(
_("Hide schedule from overall conference view"), default=False
)
def __str__(self):
return "%s Schedule" % self.section
def first_date(self):
if self.day_set.count():
return self.day_set.first().date
else:
return None
class Meta:
ordering = ["section"]
verbose_name = _("Schedule")
verbose_name_plural = _("Schedules")
class Day(models.Model):
schedule = models.ForeignKey(
Schedule, on_delete=models.CASCADE, verbose_name=_("Schedule")
)
date = models.DateField(verbose_name=_("Date"))
def __str__(self):
return "%s" % self.date
class Meta:
unique_together = [("schedule", "date")]
ordering = ["date"]
verbose_name = _("date")
verbose_name_plural = _("dates")
class Room(models.Model):
schedule = models.ForeignKey(
Schedule, on_delete=models.CASCADE, verbose_name=_("Schedule")
)
name = models.CharField(max_length=65, verbose_name=_("Name"))
order = models.PositiveIntegerField(verbose_name=_("Order"))
def __str__(self):
return self.name
class Meta:
verbose_name = _("Room")
verbose_name_plural = _("Rooms")
class SlotKind(models.Model):
"""
A slot kind represents what kind a slot is. For example, a slot can be a
break, lunch, or X-minute talk.
"""
schedule = models.ForeignKey(
Schedule, on_delete=models.CASCADE, verbose_name=_("schedule")
)
label = models.CharField(max_length=50, verbose_name=_("Label"))
def __str__(self):
return self.label
class Meta:
verbose_name = _("Slot kind")
verbose_name_plural = _("Slot kinds")
class Slot(models.Model):
day = models.ForeignKey(
Day, on_delete=models.CASCADE, verbose_name=_("Day")
)
kind = models.ForeignKey(
SlotKind, on_delete=models.CASCADE, verbose_name=_("Kind")
)
start = models.DateTimeField(verbose_name=_("Start"))
end = models.DateTimeField(verbose_name=_("End"))
content_override = models.TextField(
blank=True, verbose_name=_("Content override")
)
content_override_html = models.TextField(blank=True, editable=False)
def assign(self, content):
"""
Assign the given content to this slot and if a previous slot content
was given we need to unlink it to avoid integrity errors.
"""
self.unassign()
content.slot = self
content.save()
def unassign(self):
"""
Unassign the associated content with this slot.
"""
content = self.content
if content and content.slot_id:
content.slot = None
content.save()
@property
def content(self):
"""
Return the content this slot represents.
@@@ hard-coded for presentation for now
"""
try:
return self.content_ptr
except ObjectDoesNotExist:
return None
@property
def length_in_minutes(self):
return int(
(self.end - self.start).total_seconds() / 60
)
@property
def rooms(self):
return Room.objects.filter(pk__in=self.slotroom_set.values("room"))
def save(self, *args, **kwargs):
self.content_override_html = parse(self.content_override)
super(Slot, self).save(*args, **kwargs)
def __str__(self):
return "{!s} {!s} ({!s} - {!s}) {!s}".format(
self.day,
self.kind,
self.start,
self.end,
" ".join(map(lambda r: r.__str__(), self.rooms)),
)
class Meta:
ordering = ["day", "start", "end"]
verbose_name = _("slot")
verbose_name_plural = _("slots")
class SlotRoom(models.Model):
"""
Links a slot with a room.
"""
slot = models.ForeignKey(
Slot, on_delete=models.CASCADE, verbose_name=_("Slot")
)
room = models.ForeignKey(
Room, on_delete=models.CASCADE, verbose_name=_("Room")
)
def __str__(self):
return "%s %s" % (self.room, self.slot)
class Meta:
unique_together = [("slot", "room")]
ordering = ["slot", "room__order"]
verbose_name = _("Slot room")
verbose_name_plural = _("Slot rooms")
class Presentation(models.Model):
slot = models.OneToOneField(
Slot,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="content_ptr",
verbose_name=_("Slot"),
)
title = models.CharField(max_length=100, verbose_name=_("Title"))
slug = AutoSlugField(
default="",
editable=True,
help_text=(
"Slug that appears in presentation URLs. Automatically "
"generated from the presentation's title. This field should "
"not be edited after the schedule has been published."
),
max_length=100,
populate_from="title",
unique=True)
description = models.TextField(verbose_name=_("Description"))
description_html = models.TextField(blank=True, editable=False)
abstract = models.TextField(verbose_name=_("Abstract"))
abstract_html = models.TextField(blank=True, editable=False)
speaker = models.ForeignKey(
Speaker,
on_delete=models.CASCADE,
related_name="presentations",
verbose_name=_("Speaker"),
)
additional_speakers = models.ManyToManyField(
Speaker,
related_name="copresentations",
blank=True,
verbose_name=_("Additional speakers"),
)
cancelled = models.BooleanField(default=False, verbose_name=_("Cancelled"))
proposal_base = models.OneToOneField(
ProposalBase,
on_delete=models.CASCADE,
related_name="presentation",
verbose_name=_("Proposal base"),
)
section = models.ForeignKey(
Section,
on_delete=models.CASCADE,
related_name="presentations",
verbose_name=_("Section"),
)
def save(self, *args, **kwargs):
self.description_html = parse(self.description)
self.abstract_html = parse(self.abstract)
return super(Presentation, self).save(*args, **kwargs)
@property
def number(self):
return self.proposal.number
@property
def proposal(self):
if self.proposal_base_id is None:
return None
return ProposalBase.objects.get_subclass(pk=self.proposal_base_id)
def speakers(self):
yield self.speaker
accepted_status = AdditionalSpeaker.SPEAKING_STATUS_ACCEPTED
speakers = self.additional_speakers.filter(
additionalspeaker__status=accepted_status,
additionalspeaker__proposalbase=self.proposal_base,
)
for speaker in speakers:
yield speaker
def __str__(self):
return "#%s %s (%s)" % (self.number, self.title, self.speaker)
class Meta:
ordering = ["slot"]
verbose_name = _("presentation")
verbose_name_plural = _("presentations")
| mit | 3,761,150,195,929,748,500 | 28.142857 | 79 | 0.603586 | false | 4.116835 | false | false | false |
chrisism/plugin.program.advanced.emulator.launcher | resources/attic/objects.py | 3 | 37135 | #
# Old or obsolete code.
#
# -------------------------------------------------------------------------------------------------
# Kodi favorites launcher
# Do not use, AEL must not access favoruites.xml directly. Otherwise, addon will not be
# accepted in the Kodi official repository.
# -------------------------------------------------------------------------------------------------
class KodiLauncher(LauncherABC):
def launch(self):
self.title = self.entity_data['m_name']
self.application = FileName('xbmc.exe')
self.arguments = self.entity_data['application']
super(KodiLauncher, self).launch()
def supports_launching_roms(self): return False
def get_launcher_type(self): return LAUNCHER_KODI_FAVOURITES
def get_launcher_type_name(self): return "Kodi favourites launcher"
def change_application(self):
current_application = self.entity_data['application']
dialog = KodiDictionaryDialog()
selected_application = dialog.select('Select the favourite', self._get_kodi_favourites(), current_application)
if selected_application is None or selected_application == current_application:
return False
self.entity_data['application'] = selected_application
self.entity_data['original_favname'] = self._get_title_from_selected_favourite(selected_application, 'original_favname', self.entity_data)
return True
def get_edit_options(self):
options = collections.OrderedDict()
options['EDIT_METADATA'] = 'Edit Metadata ...'
options['EDIT_ASSETS'] = 'Edit Assets/Artwork ...'
options['SET_DEFAULT_ASSETS'] = 'Choose default Assets/Artwork ...'
options['CHANGE_CATEGORY'] = 'Change Category'
options['LAUNCHER_STATUS'] = 'Launcher status: {0}'.format(self.get_state())
options['ADVANCED_MODS'] = 'Advanced Modifications ...'
options['EXPORT_LAUNCHER'] = 'Export Launcher XML configuration ...'
options['DELETE_LAUNCHER'] = 'Delete Launcher'
return options
def get_advanced_modification_options(self):
log_debug('KodiLauncher::get_advanced_modification_options() Returning edit options')
toggle_window_str = 'ON' if self.entity_data['toggle_window'] else 'OFF'
non_blocking_str = 'ON' if self.entity_data['non_blocking'] else 'OFF'
org_favname = self.entity_data['original_favname'] if 'original_favname' in self.entity_data else 'unknown'
options = super(KodiLauncher, self).get_advanced_modification_options()
options['CHANGE_APPLICATION'] = "Change favourite: '{0}'".format(org_favname)
options['TOGGLE_WINDOWED'] = "Toggle Kodi into windowed mode (now {0})".format(toggle_window_str)
options['TOGGLE_NONBLOCKING'] = "Non-blocking launcher (now {0})".format(non_blocking_str)
return options
def _get_builder_wizard(self, wizard):
wizard = DictionarySelectionWizardDialog('application', 'Select the favourite', self._get_kodi_favourites(), wizard)
wizard = DummyWizardDialog('s_icon', '', wizard, self._get_icon_from_selected_favourite)
wizard = DummyWizardDialog('original_favname', '', wizard, self._get_title_from_selected_favourite)
wizard = DummyWizardDialog('m_name', '', wizard, self._get_title_from_selected_favourite)
wizard = KeyboardWizardDialog('m_name','Set the title of the launcher', wizard)
wizard = SelectionWizardDialog('platform', 'Select the platform', AEL_platform_list, wizard)
return wizard
def _get_kodi_favourites(self):
favourites = kodi_read_favourites()
fav_options = {}
for key in favourites:
fav_options[key] = favourites[key][0]
return fav_options
def _get_icon_from_selected_favourite(self, input, item_key, launcher):
fav_action = launcher['application']
favourites = kodi_read_favourites()
for key in favourites:
if fav_action == key:
return favourites[key][1]
return 'DefaultProgram.png'
def _get_title_from_selected_favourite(self, input, item_key, launcher):
fav_action = launcher['application']
favourites = kodi_read_favourites()
for key in favourites:
if fav_action == key:
return favourites[key][0]
return _get_title_from_app_path(input, launcher)
# -------------------------------------------------------------------------------------------------
# XmlDataContext should be a singleton and only used by the repository classes.
# This class holds the actual XML data and reads/writes that data.
# OBSOLETE CODE THAT WILL BE REMOVE.
# -------------------------------------------------------------------------------------------------
class XmlDataContext(object):
def __init__(self, xml_file_path):
log_debug('XmlDataContext::init() "{0}"'.format(xml_file_path.getPath()))
self._xml_root = None
self.repo_fname = xml_file_path
def xml_exists(self):
return self.repo_fname.exists()
#
# If the XML file does not exists (for example, when the addon is first execute) then
# allow to setup initial data.
#
def set_xml_root(self, xml_repo_str):
self._xml_root = fs_get_XML_root_from_str(xml_repo_str)
# Lazy loading of xml data through property
@property
def xml_data(self):
if self._xml_root is None: self._load_xml()
return self._xml_root
#
# If there is any problem with the filesystem then the functions display an error
# dialog and produce an Addon_Error exception.
#
def _load_xml(self):
log_debug('XmlDataContext::_load_xml() Loading "{0}"'.format(self.repo_fname.getPath()))
xml_repo_str = self.repo_fname.loadFileToStr()
self._xml_root = fs_get_XML_root_from_str(xml_repo_str)
def commit(self):
log_info('XmlDataContext::commit() Saving "{0}"'.format(self.repo_fname.getPath()))
xml_repo_str = fs_get_str_from_XML_root(self._xml_root)
self.repo_fname.saveStrToFile(xml_repo_str)
def get_nodes(self, tag):
log_debug('XmlDataContext::get_nodes(): xpath query "{}"'.format(tag))
return self.xml_data.findall(tag)
def get_node(self, tag, id):
query = "{}[id='{}']".format(tag, id)
log_debug('XmlDataContext::get_node(): xpath query "{}"'.format(query))
return self.xml_data.find(query)
def get_nodes_by(self, tag, field, value):
query = "{}[{}='{}']".format(tag, field, value)
log_debug('XmlDataContext::get_nodes_by(): xpath query "{}"'.format(query))
return self.xml_data.findall(query)
# creates/updates xml node identified by tag and id with the given dictionary of data
def save_node(self, tag, id, updated_data):
node_to_update = self.get_node(tag, id)
if node_to_update is None:
node_to_update = self.xml_data.makeelement(tag, {})
self.xml_data.append(node_to_update)
node_to_update.clear()
for key in updated_data:
element = self.xml_data.makeelement(key, {})
updated_value = updated_data[key]
# >> To simulate a list with XML allow multiple XML tags.
if isinstance(updated_data, list):
for extra_value in updated_value:
element.text = unicode(extra_value)
node_to_update.append(element)
else:
element.text = unicode(updated_value)
node_to_update.append(element)
def remove_node(self, tag, id):
node_to_remove = self.get_node(tag, id)
if node_to_remove is None:
return
self.xml_data.remove(node_to_remove)
# -------------------------------------------------------------------------------------------------
# --- Repository class for Category objects ---
# Arranges retrieving and storing of the categories from and into the XML data file.
# Creates Category objects with a reference to an instance of AELObjectFactory.
# OBSOLETE CODE THAT WILL BE REMOVE.
# -------------------------------------------------------------------------------------------------
class CategoryRepository(object):
def __init__(self, data_context, obj_factory):
self.data_context = data_context
self.obj_factory = obj_factory
# When AEL is executed for the first time categories.xml does not exists. In this case,
# create an empty memory file to avoid concurrent writing problems (AEL maybe called
# concurrently by skins). When the user creates a Category/Launcher then write
# categories.xml to the filesystem.
if not self.data_context.xml_exists():
log_debug('CategoryRepository::init() Creating empty categories repository.')
xml_repo_str = (
'<?xml version="1.0" encoding="utf-8" standalone="yes"?>'
'<advanced_emulator_launcher version="1">'
'<control>'
'<update_timestamp>0.0</update_timestamp>'
'</control>'
'</advanced_emulator_launcher>'
)
data_context.set_xml_root(xml_repo_str)
# -------------------------------------------------------------------------------------------------
# Data model used in the plugin
# Internally all string in the data model are Unicode. They will be encoded to
# UTF-8 when writing files.
# -------------------------------------------------------------------------------------------------
# These three functions create a new data structure for the given object and (very importantly)
# fill the correct default values). These must match what is written/read from/to the XML files.
# Tag name in the XML is the same as in the data dictionary.
#
def _parse_xml_to_dictionary(self, category_element):
__debug_xml_parser = False
category = {}
# Parse child tags of category
for category_child in category_element:
# By default read strings
xml_text = category_child.text if category_child.text is not None else ''
xml_text = text_unescape_XML(xml_text)
xml_tag = category_child.tag
if __debug_xml_parser: log_debug('{0} --> {1}'.format(xml_tag, xml_text.encode('utf-8')))
# Now transform data depending on tag name
if xml_tag == 'finished':
category[xml_tag] = True if xml_text == 'True' else False
else:
# Internal data is always stored as Unicode. ElementTree already outputs Unicode.
category[xml_tag] = xml_text
return category
# Finds a Category by ID in the database. ID may be a Virtual/Special category.
# Returns a Category object instance or None if the category ID is not found in the DB.
def find(self, category_id):
if category_id == VCATEGORY_ADDONROOT_ID:
category_dic = fs_new_category()
category_dic['type'] = OBJ_CATEGORY_VIRTUAL
category_dic['id'] = VCATEGORY_ADDONROOT_ID
category_dic['m_name'] = 'Root category'
else:
category_element = self.data_context.get_node('category', category_id)
if category_element is None:
log_debug('Cannot find category with id {0}'.format(category_id))
return None
category_dic = self._parse_xml_to_dictionary(category_element)
category = self.obj_factory.create_from_dic(category_dic)
return category
# Returns a list with all the Category objects. Each list element if a Category instance.
def find_all(self):
categories = []
category_elements = self.data_context.get_nodes('category')
log_debug('Found {0} categories'.format(len(category_elements)))
for category_element in category_elements:
category_dic = self._parse_xml_to_dictionary(category_element)
log_debug('Creating category instance for category {0}'.format(category_dic['id']))
category = self.obj_factory.create_from_dic(category_dic)
categories.append(category)
return categories
def get_simple_list(self):
category_list = {}
category_elements = self.data_context.get_nodes('category')
for category_element in category_elements:
id = category_element.find('id').text
name = category_element.find('m_name').text
category_list[id] = name
return category_list
def count(self):
return len(self.data_context.get_nodes('category'))
def save(self, category):
category_id = category.get_id()
self.data_context.save_node('category', category_id, category.get_data_dic())
self.data_context.commit()
def save_multiple(self, categories):
for category in categories:
category_id = category.get_id()
self.data_context.save_node('category', category_id, category.get_data_dic())
self.data_context.commit()
def delete(self, category):
category_id = category.get_id()
self.data_context.remove_node('category', category_id)
self.data_context.commit()
# -------------------------------------------------------------------------------------------------
# Repository class for Launchers objects.
# Arranges retrieving and storing of the launchers from and into the xml data file.
# OBSOLETE CODE THAT WILL BE REMOVE.
# -------------------------------------------------------------------------------------------------
class LauncherRepository(object):
def __init__(self, data_context, obj_factory):
# Categories and Launchers share an XML repository file. If categories.xml does not
# exists, the CategoryRepository() class initialises the XmlDataContext() with
# empty data.
self.data_context = data_context
self.obj_factory = obj_factory
def _parse_xml_to_dictionary(self, launcher_element):
__debug_xml_parser = False
# Sensible default values
launcher = fs_new_launcher()
if __debug_xml_parser:
log_debug('Element has {0} child elements'.format(len(launcher_element)))
# Parse child tags of launcher element
for element_child in launcher_element:
# >> By default read strings
xml_text = element_child.text if element_child.text is not None else ''
xml_text = text_unescape_XML(xml_text)
xml_tag = element_child.tag
if __debug_xml_parser:
log_debug('{0} --> {1}'.format(xml_tag, xml_text.encode('utf-8')))
# >> Transform list() datatype
if xml_tag == 'args_extra':
launcher[xml_tag].append(xml_text)
# >> Transform Bool datatype
elif xml_tag == 'finished' or xml_tag == 'toggle_window' or xml_tag == 'non_blocking' or \
xml_tag == 'multidisc':
launcher[xml_tag] = True if xml_text == 'True' else False
# >> Transform Int datatype
elif xml_tag == 'num_roms' or xml_tag == 'num_parents' or xml_tag == 'num_clones' or \
xml_tag == 'num_have' or xml_tag == 'num_miss' or xml_tag == 'num_unknown':
launcher[xml_tag] = int(xml_text)
# >> Transform Float datatype
elif xml_tag == 'timestamp_launcher' or xml_tag == 'timestamp_report':
launcher[xml_tag] = float(xml_text)
else:
launcher[xml_tag] = xml_text
return launcher
def find(self, launcher_id):
if launcher_id in [VLAUNCHER_FAVOURITES_ID, VLAUNCHER_RECENT_ID, VLAUNCHER_MOST_PLAYED_ID]:
launcher = self.launcher_factory.create_new(launcher_id)
return launcher
launcher_element = self.data_context.get_node('launcher', launcher_id)
if launcher_element is None:
log_debug('Launcher ID {} not found'.format(launcher_id))
return None
launcher_dic = self._parse_xml_to_dictionary(launcher_element)
launcher = self.obj_factory.create_from_dic(launcher_dic)
return launcher
def find_all_ids(self):
launcher_ids = []
launcher_id_elements = self.data_context.get_nodes('launcher/id')
for launcher_id_element in launcher_id_elements:
launcher_ids.append(launcher_id_element.text())
return launcher_ids
def find_all(self):
launchers = []
launcher_elements = self.data_context.get_nodes('launcher')
for launcher_element in launcher_elements:
launcher_dic = self._parse_xml_to_dictionary(launcher_element)
launcher = self.obj_factory.create_from_dic(launcher_dic)
launchers.append(launcher)
return launchers
def find_by_launcher_type(self, launcher_type):
launchers = []
launcher_elements = self.data_context.get_nodes_by('launcher', 'type', launcher_type )
for launcher_element in launcher_elements:
launcher_dic = self._parse_xml_to_dictionary(launcher_element)
launcher = self.obj_factory.create_from_dic(launcher_dic)
launchers.append(launcher)
return launchers
def find_by_category(self, category_id):
launchers = []
launcher_elements = self.data_context.get_nodes_by('launcher', 'categoryID', category_id )
if launcher_elements is None or len(launcher_elements) == 0:
log_debug('No launchers found in category {0}'.format(category_id))
return launchers
log_debug('{0} launchers found in category {1}'.format(len(launcher_elements), category_id))
for launcher_element in launcher_elements:
launcher_dic = self._parse_xml_to_dictionary(launcher_element)
launcher = self.obj_factory.create_from_dic(launcher_dic)
launchers.append(launcher)
return launchers
def count(self):
return len(self.data_context.get_nodes('launcher'))
def save(self, launcher, update_launcher_timestamp = True):
if update_launcher_timestamp:
launcher.update_timestamp()
launcher_id = launcher.get_id()
launcher_data_dic = launcher.get_data_dic()
self.data_context.save_node('launcher', launcher_id, launcher_data_dic)
self.data_context.commit()
def save_multiple(self, launchers, update_launcher_timestamp = True):
for launcher in launchers:
if update_launcher_timestamp:
launcher.update_timestamp()
launcher_id = launcher.get_id()
launcher_data_dic = launcher.get_data_dic()
self.data_context.save_node('launcher', launcher_id, launcher_data_dic)
self.data_context.commit()
def delete(self, launcher):
launcher_id = launcher.get_id()
self.data_context.remove_node('launcher', launcher_id)
self.data_context.commit()
# #################################################################################################
# #################################################################################################
# ROMsets
# #################################################################################################
# #################################################################################################
class RomSetFactory():
def __init__(self, pluginDataDir):
self.ROMS_DIR = pluginDataDir.pjoin('db_ROMs')
self.FAV_JSON_FILE_PATH = pluginDataDir.pjoin('favourites.json')
self.RECENT_PLAYED_FILE_PATH = pluginDataDir.pjoin('history.json')
self.MOST_PLAYED_FILE_PATH = pluginDataDir.pjoin('most_played.json')
self.COLLECTIONS_FILE_PATH = pluginDataDir.pjoin('collections.xml')
self.COLLECTIONS_DIR = pluginDataDir.pjoin('db_Collections')
self.VIRTUAL_CAT_TITLE_DIR = pluginDataDir.pjoin('db_title')
self.VIRTUAL_CAT_YEARS_DIR = pluginDataDir.pjoin('db_years')
self.VIRTUAL_CAT_GENRE_DIR = pluginDataDir.pjoin('db_genre')
self.VIRTUAL_CAT_DEVELOPER_DIR = pluginDataDir.pjoin('db_developer')
self.VIRTUAL_CAT_CATEGORY_DIR = pluginDataDir.pjoin('db_category')
self.VIRTUAL_CAT_NPLAYERS_DIR = pluginDataDir.pjoin('db_nplayers')
self.VIRTUAL_CAT_ESRB_DIR = pluginDataDir.pjoin('db_esrb')
self.VIRTUAL_CAT_RATING_DIR = pluginDataDir.pjoin('db_rating')
if not self.ROMS_DIR.exists(): self.ROMS_DIR.makedirs()
if not self.VIRTUAL_CAT_TITLE_DIR.exists(): self.VIRTUAL_CAT_TITLE_DIR.makedirs()
if not self.VIRTUAL_CAT_YEARS_DIR.exists(): self.VIRTUAL_CAT_YEARS_DIR.makedirs()
if not self.VIRTUAL_CAT_GENRE_DIR.exists(): self.VIRTUAL_CAT_GENRE_DIR.makedirs()
if not self.VIRTUAL_CAT_DEVELOPER_DIR.exists(): self.VIRTUAL_CAT_DEVELOPER_DIR.makedirs()
if not self.VIRTUAL_CAT_CATEGORY_DIR.exists(): self.VIRTUAL_CAT_CATEGORY_DIR.makedirs()
if not self.VIRTUAL_CAT_NPLAYERS_DIR.exists(): self.VIRTUAL_CAT_NPLAYERS_DIR.makedirs()
if not self.VIRTUAL_CAT_ESRB_DIR.exists(): self.VIRTUAL_CAT_ESRB_DIR.makedirs()
if not self.VIRTUAL_CAT_RATING_DIR.exists(): self.VIRTUAL_CAT_RATING_DIR.makedirs()
if not self.COLLECTIONS_DIR.exists(): self.COLLECTIONS_DIR.makedirs()
def create(self, categoryID, launcher_data):
launcherID = launcher_data['id']
log_debug('romsetfactory.create(): categoryID={0}'.format(categoryID))
log_debug('romsetfactory.create(): launcherID={0}'.format(launcherID))
description = self.createDescription(categoryID)
# --- ROM in Favourites ---
if categoryID == VCATEGORY_FAVOURITES_ID and launcherID == VLAUNCHER_FAVOURITES_ID:
return FavouritesRomSet(self.FAV_JSON_FILE_PATH, launcher_data, description)
# --- ROM in Most played ROMs ---
elif categoryID == VCATEGORY_MOST_PLAYED_ID and launcherID == VLAUNCHER_MOST_PLAYED_ID:
return FavouritesRomSet(self.MOST_PLAYED_FILE_PATH, launcher_data, description)
# --- ROM in Recently played ROMs list ---
elif categoryID == VCATEGORY_RECENT_ID and launcherID == VLAUNCHER_RECENT_ID:
return RecentlyPlayedRomSet(self.RECENT_PLAYED_FILE_PATH, launcher_data, description)
# --- ROM in Collection ---
elif categoryID == VCATEGORY_COLLECTIONS_ID:
return CollectionRomSet(self.COLLECTIONS_FILE_PATH, launcher_data, self.COLLECTIONS_DIR, launcherID, description)
# --- ROM in Virtual Launcher ---
elif categoryID == VCATEGORY_TITLE_ID:
log_info('RomSetFactory() loading ROM set Title Virtual Launcher ...')
return VirtualLauncherRomSet(self.VIRTUAL_CAT_TITLE_DIR, launcher_data, launcherID, description)
elif categoryID == VCATEGORY_YEARS_ID:
log_info('RomSetFactory() loading ROM set Years Virtual Launcher ...')
return VirtualLauncherRomSet(self.VIRTUAL_CAT_YEARS_DIR, launcher_data, launcherID, description)
elif categoryID == VCATEGORY_GENRE_ID:
log_info('RomSetFactory() loading ROM set Genre Virtual Launcher ...')
return VirtualLauncherRomSet(self.VIRTUAL_CAT_GENRE_DIR, launcher_data, launcherID, description)
elif categoryID == VCATEGORY_DEVELOPER_ID:
log_info('RomSetFactory() loading ROM set Studio Virtual Launcher ...')
return VirtualLauncherRomSet(self.VIRTUAL_CAT_DEVELOPER_DIR, launcher_data, launcherID, description)
elif categoryID == VCATEGORY_NPLAYERS_ID:
log_info('RomSetFactory() loading ROM set NPlayers Virtual Launcher ...')
return VirtualLauncherRomSet(self.VIRTUAL_CAT_NPLAYERS_DIR, launcher_data, launcherID, description)
elif categoryID == VCATEGORY_ESRB_ID:
log_info('RomSetFactory() loading ROM set ESRB Virtual Launcher ...')
return VirtualLauncherRomSet(self.VIRTUAL_CAT_ESRB_DIR, launcher_data, launcherID, description)
elif categoryID == VCATEGORY_RATING_ID:
log_info('RomSetFactory() loading ROM set Rating Virtual Launcher ...')
return VirtualLauncherRomSet(self.VIRTUAL_CAT_RATING_DIR, launcher_data, launcherID, description)
elif categoryID == VCATEGORY_CATEGORY_ID:
return VirtualLauncherRomSet(self.VIRTUAL_CAT_CATEGORY_DIR, launcher_data, launcherID, description)
elif categoryID == VCATEGORY_PCLONES_ID \
and 'launcher_display_mode' in launcher_data \
and launcher_data['launcher_display_mode'] != LAUNCHER_DMODE_FLAT:
return PcloneRomSet(self.ROMS_DIR, launcher_data, description)
log_info('RomSetFactory() loading standard romset...')
return StandardRomSet(self.ROMS_DIR, launcher_data, description)
def createDescription(self, categoryID):
if categoryID == VCATEGORY_FAVOURITES_ID:
return RomSetDescription('Favourite', 'Browse favourites')
elif categoryID == VCATEGORY_MOST_PLAYED_ID:
return RomSetDescription('Most Played ROM', 'Browse most played')
elif categoryID == VCATEGORY_RECENT_ID:
return RomSetDescription('Recently played ROM', 'Browse by recently played')
elif categoryID == VCATEGORY_TITLE_ID:
return RomSetDescription('Virtual Launcher Title', 'Browse by Title')
elif categoryID == VCATEGORY_YEARS_ID:
return RomSetDescription('Virtual Launcher Years', 'Browse by Year')
elif categoryID == VCATEGORY_GENRE_ID:
return RomSetDescription('Virtual Launcher Genre', 'Browse by Genre')
elif categoryID == VCATEGORY_DEVELOPER_ID:
return RomSetDescription('Virtual Launcher Studio','Browse by Studio')
elif categoryID == VCATEGORY_NPLAYERS_ID:
return RomSetDescription('Virtual Launcher NPlayers', 'Browse by Number of Players')
elif categoryID == VCATEGORY_ESRB_ID:
return RomSetDescription('Virtual Launcher ESRB', 'Browse by ESRB Rating')
elif categoryID == VCATEGORY_RATING_ID:
return RomSetDescription('Virtual Launcher Rating', 'Browse by User Rating')
elif categoryID == VCATEGORY_CATEGORY_ID:
return RomSetDescription('Virtual Launcher Category', 'Browse by Category')
#if virtual_categoryID == VCATEGORY_TITLE_ID:
# vcategory_db_filename = VCAT_TITLE_FILE_PATH
# vcategory_name = 'Browse by Title'
#elif virtual_categoryID == VCATEGORY_YEARS_ID:
# vcategory_db_filename = VCAT_YEARS_FILE_PATH
# vcategory_name = 'Browse by Year'
#elif virtual_categoryID == VCATEGORY_GENRE_ID:
# vcategory_db_filename = VCAT_GENRE_FILE_PATH
# vcategory_name = 'Browse by Genre'
#elif virtual_categoryID == VCATEGORY_STUDIO_ID:
# vcategory_db_filename = VCAT_STUDIO_FILE_PATH
# vcategory_name = 'Browse by Studio'
#elif virtual_categoryID == VCATEGORY_NPLAYERS_ID:
# vcategory_db_filename = VCAT_NPLAYERS_FILE_PATH
# vcategory_name = 'Browse by Number of Players'
#elif virtual_categoryID == VCATEGORY_ESRB_ID:
# vcategory_db_filename = VCAT_ESRB_FILE_PATH
# vcategory_name = 'Browse by ESRB Rating'
#elif virtual_categoryID == VCATEGORY_RATING_ID:
# vcategory_db_filename = VCAT_RATING_FILE_PATH
# vcategory_name = 'Browse by User Rating'
#elif virtual_categoryID == VCATEGORY_CATEGORY_ID:
# vcategory_db_filename = VCAT_CATEGORY_FILE_PATH
# vcategory_name = 'Browse by Category'
return None
class RomSetDescription():
def __init__(self, title, description, isRegularLauncher = False):
self.title = title
self.description = description
self.isRegularLauncher = isRegularLauncher
class RomSet():
__metaclass__ = abc.ABCMeta
def __init__(self, romsDir, launcher, description):
self.romsDir = romsDir
self.launcher = launcher
self.description = description
@abc.abstractmethod
def romSetFileExists(self):
return False
@abc.abstractmethod
def loadRoms(self):
return {}
@abc.abstractmethod
def loadRomsAsList(self):
return []
@abc.abstractmethod
def loadRom(self, romId):
return None
@abc.abstractmethod
def saveRoms(self, roms):
pass
@abc.abstractmethod
def clear(self):
pass
class StandardRomSet(RomSet):
def __init__(self, romsDir, launcher, description):
self.roms_base_noext = launcher['roms_base_noext'] if launcher is not None and 'roms_base_noext' in launcher else None
self.view_mode = launcher['launcher_display_mode'] if launcher is not None and 'launcher_display_mode' in launcher else None
if self.roms_base_noext is None:
self.repositoryFile = romsDir
elif self.view_mode == LAUNCHER_DMODE_FLAT:
self.repositoryFile = romsDir.pjoin(self.roms_base_noext + '.json')
else:
self.repositoryFile = romsDir.pjoin(self.roms_base_noext + '_parents.json')
super(StandardRomSet, self).__init__(romsDir, launcher, description)
def romSetFileExists(self):
return self.repositoryFile.exists()
def loadRoms(self):
if not self.romSetFileExists():
log_warning('Launcher "{0}" JSON not found.'.format(self.roms_base_noext))
return None
log_info('StandardRomSet() Loading ROMs in Launcher ...')
# was disk_IO.fs_load_ROMs_JSON()
roms = {}
# --- Parse using json module ---
# >> On Github issue #8 a user had an empty JSON file for ROMs. This raises
# exception exceptions.ValueError and launcher cannot be deleted. Deal
# with this exception so at least launcher can be rescanned.
log_verb('StandardRomSet.loadRoms() FILE {0}'.format(self.repositoryFile.getPath()))
try:
roms = self.repositoryFile.readJson()
except ValueError:
statinfo = roms_json_file.stat()
log_error('StandardRomSet.loadRoms() ValueError exception in json.load() function')
log_error('StandardRomSet.loadRoms() Dir {0}'.format(self.repositoryFile.getPath()))
log_error('StandardRomSet.loadRoms() Size {0}'.format(statinfo.st_size))
return roms
def loadRomsAsList(self):
roms_dict = self.loadRoms()
if roms_dict is None:
return None
roms = []
for key in roms_dict:
roms.append(roms_dict[key])
return roms
def loadRom(self, romId):
roms = self.loadRoms()
if roms is None:
log_error("StandardRomSet(): Could not load roms")
return None
romData = roms[romId]
if romData is None:
log_warning("StandardRomSet(): Rom with ID '{0}' not found".format(romId))
return None
return romData
def saveRoms(self, roms):
fs_write_ROMs_JSON(self.romsDir, self.launcher, roms)
pass
def clear(self):
fs_unlink_ROMs_database(self.romsDir, self.launcher)
class PcloneRomSet(StandardRomSet):
def __init__(self, romsDir, launcher, description):
super(PcloneRomSet, self).__init__(romsDir, launcher, description)
self.roms_base_noext = launcher['roms_base_noext'] if launcher is not None and 'roms_base_noext' in launcher else None
self.repositoryFile = self.romsDir.pjoin(self.roms_base_noext + '_index_PClone.json')
class FavouritesRomSet(StandardRomSet):
def loadRoms(self):
log_info('FavouritesRomSet() Loading ROMs in Favourites ...')
roms = fs_load_Favourites_JSON(self.repositoryFile)
return roms
def saveRoms(self, roms):
log_info('FavouritesRomSet() Saving Favourites ROMs ...')
fs_write_Favourites_JSON(self.repositoryFile, roms)
class VirtualLauncherRomSet(StandardRomSet):
def __init__(self, romsDir, launcher, launcherID, description):
self.launcherID = launcherID
super(VirtualLauncherRomSet, self).__init__(romsDir, launcher, description)
def romSetFileExists(self):
hashed_db_filename = self.romsDir.pjoin(self.launcherID + '.json')
return hashed_db_filename.exists()
def loadRoms(self):
if not self.romSetFileExists():
log_warning('VirtualCategory "{0}" JSON not found.'.format(self.launcherID))
return None
log_info('VirtualCategoryRomSet() Loading ROMs in Virtual Launcher ...')
roms = fs_load_VCategory_ROMs_JSON(self.romsDir, self.launcherID)
return roms
def saveRoms(self, roms):
fs_write_Favourites_JSON(self.romsDir, roms)
pass
class RecentlyPlayedRomSet(RomSet):
def romSetFileExists(self):
return self.romsDir.exists()
def loadRoms(self):
log_info('RecentlyPlayedRomSet() Loading ROMs in Recently Played ROMs ...')
romsList = self.loadRomsAsList()
roms = collections.OrderedDict()
for rom in romsList:
roms[rom['id']] = rom
return roms
def loadRomsAsList(self):
roms = fs_load_Collection_ROMs_JSON(self.romsDir)
return roms
def loadRom(self, romId):
roms = self.loadRomsAsList()
if roms is None:
log_error("RecentlyPlayedRomSet(): Could not load roms")
return None
current_ROM_position = fs_collection_ROM_index_by_romID(romId, roms)
if current_ROM_position < 0:
kodi_dialog_OK('Collection ROM not found in list. This is a bug!')
return None
romData = roms[current_ROM_position]
if romData is None:
log_warning("RecentlyPlayedRomSet(): Rom with ID '{0}' not found".format(romId))
return None
return romData
def saveRoms(self, roms):
fs_write_Collection_ROMs_JSON(self.romsDir, roms)
pass
def clear(self):
pass
class CollectionRomSet(RomSet):
def __init__(self, romsDir, launcher, collection_dir, launcherID, description):
self.collection_dir = collection_dir
self.launcherID = launcherID
super(CollectionRomSet, self).__init__(romsDir, launcher, description)
def romSetFileExists(self):
(collections, update_timestamp) = fs_load_Collection_index_XML(self.romsDir)
collection = collections[self.launcherID]
roms_json_file = self.romsDir.pjoin(collection['roms_base_noext'] + '.json')
return roms_json_file.exists()
def loadRomsAsList(self):
(collections, update_timestamp) = fs_load_Collection_index_XML(self.romsDir)
collection = collections[self.launcherID]
roms_json_file = self.collection_dir.pjoin(collection['roms_base_noext'] + '.json')
romsList = fs_load_Collection_ROMs_JSON(roms_json_file)
return romsList
# NOTE ROMs in a collection are stored as a list and ROMs in Favourites are stored as
# a dictionary. Convert the Collection list into an ordered dictionary and then
# converted back the ordered dictionary into a list before saving the collection.
def loadRoms(self):
log_info('CollectionRomSet() Loading ROMs in Collection ...')
romsList = self.loadRomsAsList()
roms = collections.OrderedDict()
for rom in romsList:
roms[rom['id']] = rom
return roms
def loadRom(self, romId):
roms = self.loadRomsAsList()
if roms is None:
log_error("CollectionRomSet(): Could not load roms")
return None
current_ROM_position = fs_collection_ROM_index_by_romID(romId, roms)
if current_ROM_position < 0:
kodi_dialog_OK('Collection ROM not found in list. This is a bug!')
return
romData = roms[current_ROM_position]
if romData is None:
log_warning("CollectionRomSet(): Rom with ID '{0}' not found".format(romId))
return None
return romData
def saveRoms(self, roms):
# >> Convert back the OrderedDict into a list and save Collection
collection_rom_list = []
for key in roms:
collection_rom_list.append(roms[key])
json_file_path = self.romsDir.pjoin(collection['roms_base_noext'] + '.json')
fs_write_Collection_ROMs_JSON(json_file_path, collection_rom_list)
def clear(self):
pass
| gpl-2.0 | -1,525,348,331,375,710,000 | 43.313842 | 146 | 0.612145 | false | 3.841817 | false | false | false |
freedomtan/tensorflow | tensorflow/python/kernel_tests/aggregate_ops_test.py | 1 | 5259 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for aggregate_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class AddNTest(test.TestCase):
# AddN special-cases adding the first M inputs to make (N - M) divisible by 8,
# after which it adds the remaining (N - M) tensors 8 at a time in a loop.
# Test N in [1, 10] so we check each special-case from 1 to 9 and one
# iteration of the loop.
_MAX_N = 10
def _supported_types(self):
if test.is_gpu_available():
return [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128, dtypes.int64
]
return [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128]
def _buildData(self, shape, dtype):
data = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testAddN(self):
np.random.seed(12345)
with self.session(use_gpu=True) as sess:
for dtype in self._supported_types():
for count in range(1, self._MAX_N + 1):
data = [self._buildData((2, 2), dtype) for _ in range(count)]
actual = self.evaluate(math_ops.add_n(data))
expected = np.sum(np.vstack(
[np.expand_dims(d, 0) for d in data]), axis=0)
tol = 5e-3 if dtype == dtypes.float16 else 5e-7
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testUnknownShapes(self):
np.random.seed(12345)
with self.session(use_gpu=True) as sess:
for dtype in self._supported_types():
data = self._buildData((2, 2), dtype)
for count in range(1, self._MAX_N + 1):
data_ph = array_ops.placeholder(dtype=dtype)
actual = sess.run(math_ops.add_n([data_ph] * count), {data_ph: data})
expected = np.sum(np.vstack([np.expand_dims(data, 0)] * count),
axis=0)
tol = 5e-3 if dtype == dtypes.float16 else 5e-7
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testVariant(self):
def create_constant_variant(value):
return constant_op.constant(
tensor_pb2.TensorProto(
dtype=dtypes.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(value, dtype=np.int32).tobytes())
]))
# TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant
# copying between CPU and GPU is supported.
with self.session(use_gpu=False):
num_tests = 127
values = list(range(100))
variant_consts = [create_constant_variant(x) for x in values]
sum_count_indices = np.random.randint(1, 29, size=num_tests)
sum_indices = [
np.random.randint(100, size=count) for count in sum_count_indices]
expected_sums = [np.sum(x) for x in sum_indices]
variant_sums = [math_ops.add_n([variant_consts[i] for i in x])
for x in sum_indices]
# We use as_string() to get the Variant DebugString for the
# variant_sums; we know its value so we can check via string equality
# here.
#
# Right now, non-numpy-compatible objects cannot be returned from a
# session.run call; similarly, objects that can't be converted to
# native numpy types cannot be passed to ops.convert_to_tensor.
variant_sums_string = string_ops.as_string(variant_sums)
self.assertAllEqual(
variant_sums_string,
["Variant<type: int value: {}>".format(s).encode("utf-8")
for s in expected_sums])
if __name__ == "__main__":
test.main()
| apache-2.0 | -1,037,729,551,034,319,600 | 40.085938 | 80 | 0.646891 | false | 3.764495 | true | false | false |
mittagessen/kraken | kraken/contrib/forced_alignment_overlay.py | 1 | 3194 | #!/usr/bin/env python
"""
Draws a transparent overlay of the forced alignment output over the input
image. Needs OpenFST bindings installed.
"""
import re
import os
import click
import unicodedata
from itertools import cycle
from collections import defaultdict
from PIL import Image, ImageDraw
cmap = cycle([(230, 25, 75, 127),
(60, 180, 75, 127),
(255, 225, 25, 127),
(0, 130, 200, 127),
(245, 130, 48, 127),
(145, 30, 180, 127),
(70, 240, 240, 127)])
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
value = unicodedata.normalize('NFKD', value)
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
value = re.sub(r'[-\s]+', '-', value)
return value
@click.command()
@click.option('-f', '--format-type', type=click.Choice(['xml', 'alto', 'page']), default='xml',
help='Sets the input document format. In ALTO and PageXML mode all'
'data is extracted from xml files containing both baselines, polygons, and a'
'link to source images.')
@click.option('-i', '--model', default=None, show_default=True, type=click.Path(exists=True),
help='Transcription model to use.')
@click.option('-o', '--output', type=click.Choice(['alto', 'pagexml', 'overlay']),
show_default=True, default='overlay', help='Output mode. Either page or'
' alto for xml output, overlay for image overlays.')
@click.argument('files', nargs=-1)
def cli(format_type, model, output, files):
"""
A script producing overlays of lines and regions from either ALTO or
PageXML files or run a model to do the same.
"""
if len(files) == 0:
ctx = click.get_current_context()
click.echo(ctx.get_help())
ctx.exit()
from PIL import Image, ImageDraw
from kraken.lib import models, xml
from kraken import align, serialization
if format_type == 'xml':
fn = xml.parse_xml
elif format_type == 'alto':
fn = xml.parse_palto
else:
fn = xml.parse_page
click.echo(f'Loading model {model}')
net = models.load_any(model)
for doc in files:
click.echo(f'Processing {doc} ', nl=False)
data = fn(doc)
im = Image.open(data['image']).convert('RGBA')
records = align.forced_align(data, net)
if output == 'overlay':
tmp = Image.new('RGBA', im.size, (0, 0, 0, 0))
draw = ImageDraw.Draw(tmp)
for record in records:
for pol in record.cuts:
c = next(cmap)
draw.polygon([tuple(x) for x in pol], fill=c, outline=c[:3])
base_image = Image.alpha_composite(im, tmp)
base_image.save(f'high_{os.path.basename(doc)}_algn.png')
else:
with open(f'{os.path.basename(doc)}_algn.xml', 'w') as fp:
fp.write(serialization.serialize(records, image_name=data['image'], regions=data['regions'], template=output))
click.secho('\u2713', fg='green')
if __name__ == '__main__':
cli()
| apache-2.0 | -3,424,175,375,749,796,000 | 34.488889 | 126 | 0.586725 | false | 3.679724 | false | false | false |
tectronics/nyctos | src/data.res/scripts/weapons/dagger.py | 1 | 1085 | import parole
from parole.colornames import colors
from parole.display import interpolateRGB
import pygame, random
import sim_items
from util import *
class Dagger(sim_items.Weapon):
def __init__(self):
bonuses = {
'hitMod': +4,
'damageSkewMod': -4,
}
# TODO: depth, materials, etc.
sim_items.Weapon.__init__(self,
'dagger', # base name
parole.map.AsciiTile('(', colors['Silver']), # symbol
1, # weight
800, # wield energy
850, # attack energy
30, # maximum damage. a wielder with avg stats will average half
# this much damage
bonuses,
'stab', # verb
False, # startsVowel
unidDescription="A small bladed weapon perhaps better suited "\
"for a bar fight than dungeon crawling.",
projectileDamage=25
)
#========================================
thingClass = Dagger
| gpl-2.0 | -8,417,306,946,014,794,000 | 29.138889 | 80 | 0.490323 | false | 4.35743 | false | false | false |
Garfielt/Joyaay | libs/utils.py | 1 | 1489 | # -*- coding: utf-8 -*-
import os
import time
import string
import datetime
try:
from hashlib import md5
except ImportError:
from md5 import md5
class dict_to_object(dict):
def __getattr__(self, key):
try:
return self[key]
except:
return ''
def __setattr__(self, key, value):
self[key] = value
def is_int(s):
for i in s:
if i not in "1234567890":
return False
return True
def isset(v):
try:
type (eval(v))
except:
return False
else:
return True
def check_str(cstr):
return filter(lambda st: st not in " '\";()<>[]", cstr)
def cut_str(cstr):
pass
def timestamp():
return time.time()
def now():
return time.localtime()
def micro_time():
return datetime.datetime.now()
def format_time(tformat = "%Y-%m-%d %X", ttime = None):
if not ttime: ttime = now()
return time.strftime(tformat, ttime)
def hash_md5(s):
m = md5(s)
m.digest()
return m.hexdigest()
def rand_name(s):
if not isinstance(s, unicode):
s = s.decode('utf-8')
return hash_md5("%s-%f" % (s, timestamp()))
def is_file_exist(filepath):
return os.path.isfile(filepath)
def file_real_path(filename, subdir = ''):
folder = os.path.join(os.path.dirname(__file__), subdir)
if not os.path.exists(folder):
os.makedirs(folder)
return '%s/%s' % (folder, filename)
if __name__ == "__main__":
print '' | mit | -5,403,358,708,990,215,000 | 18.350649 | 60 | 0.572196 | false | 3.294248 | false | false | false |
orbkit/orbkit | orbkit/read/cclib_parser.py | 1 | 7730 | import numpy
from orbkit.qcinfo import QCinfo
from orbkit.orbitals import AOClass, MOClass
from orbkit.units import aa_to_a0, ev_to_ha
from orbkit.display import display
from orbkit.tools import l_deg, lquant, get_atom_symbol
from importlib import import_module
def read_with_cclib(filename, cclib_parser=None, all_mo=False, spin=None,
**kwargs):
'''Reads all information desired using cclib.
**Parameters:**
filename : str
Specifies the filename for the input file.
cclib_parser : str
If itype is 'cclib', specifies the cclib.parser.
all_mo : bool, optional
If True, all molecular orbitals are returned.
spin : {None, 'alpha', or 'beta'}, optional
If not None, returns exclusively 'alpha' or 'beta' molecular orbitals.
**Returns:**
qc (class QCinfo) with attributes geo_spec, geo_info, ao_spec, mo_spec, etot :
See :ref:`Central Variables` for details.
'''
#Maybe we actually don't need this
#Can someone check if cclib can handle
#file descriptors?
assert isinstance(filename, str)
if not isinstance(cclib_parser,str):
raise IOError('cclib requires the specification of parser, e.g., ' +
'cclib_parser="Gaussian".')
if cclib_parser == 'Molpro':
display('\nThe Molpro basis set is not properly read by the cclib parser.')
display('Please create a molden file with Molpro, i.e., ' +
'\n\tput,molden,output.molden,NEW;\n')
parsedic = {'Gaussian': 'gaussianparser', 'Gamess': 'gamessparser',
'Orca': 'orcaparser'}
module = import_module('cclib.parser.{}'.format(parsedic[cclib_parser]))
if cclib_parser != 'Gaussian':
cclib_parser = cclib_parser.upper()
parser = getattr(module,cclib_parser)(filename)
ccData = parser.parse()
return convert_cclib(ccData, all_mo=all_mo, spin=spin)
def convert_cclib(ccData, all_mo=False, spin=None):
'''Converts a ccData class created by cclib to an instance of
orbkit's QCinfo class.
**Parameters:**
ccData : class
Contains the input data created by cclib.
all_mo : bool, optional
If True, all molecular orbitals are returned.
spin : {None, 'alpha', or 'beta'}, optional
If not None, returns exclusively 'alpha' or 'beta' molecular orbitals.
**Returns:**
qc (class QCinfo) with attributes geo_spec, geo_info, ao_spec, mo_spec, etot :
See :ref:`Central Variables` for details.
'''
# Initialize the variables
qc = QCinfo()
qc.ao_spec = AOClass([])
qc.mo_spec = MOClass([])
# Converting all information concerning atoms and geometry
qc.geo_spec = ccData.atomcoords[0] * aa_to_a0
for ii in range(ccData.natom):
symbol = get_atom_symbol(atom=ccData.atomnos[ii])
qc.geo_info.append([symbol,str(ii+1),str(ccData.atomnos[ii])])
# Convert geo_info and geo_spec to numpy.ndarrays
qc.format_geo()
# Converting all information about atomic basis set
for ii in range(ccData.natom):
for jj in range(len(ccData.gbasis[ii])):
pnum = len(ccData.gbasis[ii][jj][1])
qc.ao_spec.append({'atom': ii,
'type': str(ccData.gbasis[ii][jj][0]).lower(),
'pnum': pnum,
'coeffs': numpy.zeros((pnum, 2))
})
for kk in range(pnum):
qc.ao_spec[-1]['coeffs'][kk][0] = ccData.gbasis[ii][jj][1][kk][0]
qc.ao_spec[-1]['coeffs'][kk][1] = ccData.gbasis[ii][jj][1][kk][1]
if hasattr(ccData,'aonames'):
# Reconstruct exponents list for ao_spec
cartesian_basis = True
for i in ccData.aonames:
if '+' in i or '-' in i:
cartesian_basis = False
if not cartesian_basis:
qc.ao_spec.spherical = True
count = 0
for i,ao in enumerate(qc.ao_spec):
l = l_deg(lquant[ao['type']],cartesian_basis=cartesian_basis)
if cartesian_basis:
ao['lxlylz'] = []
else:
ao['lm'] = []
for ll in range(l):
if cartesian_basis:
ao['lxlylz'].append((ccData.aonames[count].lower().count('x'),
ccData.aonames[count].lower().count('y'),
ccData.aonames[count].lower().count('z')))
else:
m = ccData.aonames[count].lower().split('_')[-1]
m = m.replace('+',' +').replace('-',' -').replace('s','s 0').split(' ')
p = 'yzx'.find(m[0][-1])
if p != -1:
m = p - 1
else:
m = int(m[-1])
ao['lm'].append((lquant[ao['type']],m))
count += 1
# Converting all information about molecular orbitals
ele_num = numpy.sum(ccData.atomnos) - numpy.sum(ccData.coreelectrons) - ccData.charge
ue = (ccData.mult-1)
# Check for natural orbitals and occupation numbers
is_natorb = False
if hasattr(ccData,'nocoeffs'):
if not hasattr(ccData,'nooccnos'):
raise IOError('There are natural orbital coefficients (`nocoeffs`) in the cclib' +
' ccData, but no natural occupation numbers (`nooccnos`)!')
is_natorb = True
restricted = (len(ccData.mosyms) == 1)
if spin is not None:
if spin != 'alpha' and spin != 'beta':
raise IOError('`spin=%s` is not a valid option' % spin)
elif restricted:
raise IOError('The keyword `spin` is only supported for unrestricted calculations.')
else:
qc.mo_spec.spinpola
display('Converting only molecular orbitals of spin %s.' % spin)
sym = {}
if len(ccData.mosyms) == 1:
add = ['']
orb_sym = [None]
else:
add = ['_a','_b']
orb_sym = ['alpha','beta']
nmo = ccData.nmo if hasattr(ccData,'nmo') else len(ccData.mocoeffs[0])
for ii in range(nmo):
for i,j in enumerate(add):
a = '%s%s' % (ccData.mosyms[i][ii],j)
if a not in sym.keys(): sym[a] = 1
else: sym[a] += 1
if is_natorb:
occ_num = ccData.nooccnos[ii]
elif not restricted:
occ_num = 1.0 if ii <= ccData.homos[i] else 0.0
elif ele_num > ue:
occ_num = 2.0
ele_num -= 2.0
elif ele_num > 0.0 and ele_num <= ue:
occ_num = 1.0
ele_num -= 1.0
ue -= 1.0
else:
occ_num = 0.0
qc.mo_spec.append({'coeffs': (ccData.nocoeffs if is_natorb else ccData.mocoeffs[i])[ii],
'energy': 0.0 if is_natorb else ccData.moenergies[i][ii]*ev_to_ha,
'occ_num': occ_num,
'sym': '%d.%s' %(sym[a],a)
})
if orb_sym[i] is not None:
qc.mo_spec[-1]['spin'] = orb_sym[i]
if spin is not None and spin != orb_sym[i]:
del qc.mo_spec[-1]
# Use default order for atomic basis functions if aonames is not present
if not hasattr(ccData,'aonames'):
display('The attribute `aonames` is not present in the parsed data.')
display('Using the default order of basis functions.')
# Check which basis functions have been used
c_cart = sum([l_deg(l=ao['type'], cartesian_basis=True) for ao in qc.ao_spec])
c_sph = sum([l_deg(l=ao['type'], cartesian_basis=False) for ao in qc.ao_spec])
c = qc.mo_spec.get_coeffs().shape[-1]
if c != c_cart and c == c_sph: # Spherical basis
qc.ao_spec.set_lm_dict(p=[0,1])
elif c != c_cart:
display('Warning: The basis set type does not match with pure spherical ' +
'or pure Cartesian basis!')
display('Please specify qc.ao_spec["lxlylz"] and/or qc.ao_spec["lm"] by your self.')
# Are all MOs requested for the calculation?
if not all_mo:
for i in range(len(qc.mo_spec))[::-1]:
if qc.mo_spec[i]['occ_num'] < 0.0000001:
del qc.mo_spec[i]
qc.mo_spec.update()
qc.ao_spec.update()
return qc
| lgpl-3.0 | -1,085,317,817,685,771,900 | 34.296804 | 94 | 0.596248 | false | 3.191577 | false | false | false |
scraperwiki/spreadsheet-download-tool | pyexcelerate/tests/test_Workbook.py | 1 | 2784 | from ..Workbook import Workbook
import time
import numpy
import nose
import os
from datetime import datetime
from nose.tools import eq_
from .utils import get_output_path
def test_get_xml_data():
wb = Workbook()
ws = wb.new_sheet("Test")
ws[1][1].value = 1
eq_(ws[1][1].value, 1)
ws[1][3].value = 3
eq_(ws[1][3].value, 3)
def test_save():
ROWS = 65
COLUMNS = 100
wb = Workbook()
testData = [[1] * COLUMNS] * ROWS
stime = time.clock()
ws = wb.new_sheet("Test 1", data=testData)
wb.save(get_output_path("test.xlsx"))
#print("%s, %s, %s" % (ROWS, COLUMNS, time.clock() - stime))
def test_formulas():
wb = Workbook()
ws = wb.new_sheet("test")
ws[1][1].value = 1
ws[1][2].value = 2
ws[1][3].value = '=SUM(A1,B1)'
ws[1][4].value = datetime.now()
ws[1][5].value = datetime(1900,1,1,1,0,0)
wb.save(get_output_path("formula-test.xlsx"))
def test_merge():
wb = Workbook()
ws = wb.new_sheet("test")
ws[1][1].value = "asdf"
ws.range("A1", "B1").merge()
eq_(ws[1][2].value, ws[1][1].value)
ws[1][2].value = "qwer"
eq_(ws[1][2].value, ws[1][1].value)
wb.save(get_output_path("merge-test.xlsx"))
def test_cell():
wb = Workbook()
ws = wb.new_sheet("test")
ws.cell("C3").value = "test"
eq_(ws[3][3].value, "test")
def test_range():
wb = Workbook()
ws = wb.new_sheet("test")
ws.range("B2", "D3").value = [[1, 2, 3], [4, 5, 6]]
eq_(ws[2][2].value, 1)
eq_(ws[2][3].value, 2)
eq_(ws[2][4].value, 3)
eq_(ws[3][2].value, 4)
eq_(ws[3][3].value, 5)
eq_(ws[3][4].value, 6)
def test_numpy_range():
wb = Workbook()
ws = wb.new_sheet("test")
ws.range("A1", "GN13").value = numpy.zeros((13,196))
wb.save(get_output_path("numpy-range-test.xlsx"))
def test_none():
testData = [[1,2,None]]
wb = Workbook()
ws = wb.new_sheet("Test 1", data=testData)
ws[1][1].style.font.bold = True
wb.save(get_output_path("none-test.xlsx"))
def test_number_precision():
try:
import xlrd
except ImportError:
raise nose.SkipTest('xlrd not installed')
filename = get_output_path('precision.xlsx')
sheetname = 'Sheet1'
nums = [
1,
1.2,
1.23,
1.234,
1.2345,
1.23456,
1.234567,
1.2345678,
1.23456789,
1.234567890,
1.2345678901,
1.23456789012,
1.234567890123,
1.2345678901234,
1.23456789012345,
]
write_workbook = Workbook()
write_worksheet = write_workbook.new_sheet(sheetname)
for index, value in enumerate(nums):
write_worksheet[index + 1][1].value = value
write_workbook.save(filename)
read_workbook = xlrd.open_workbook(filename)
read_worksheet = read_workbook.sheet_by_name(sheetname)
for row_num in range(len(nums)):
expected = nums[row_num]
got = read_worksheet.cell(row_num, 0).value
if os.path.exists(filename):
os.remove(filename)
| bsd-2-clause | -2,669,089,527,514,485,000 | 21.819672 | 64 | 0.619612 | false | 2.41039 | true | false | false |
joachimmetz/plaso | tests/parsers/winreg_plugins/officemru.py | 2 | 4501 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
import unittest
from plaso.lib import definitions
from plaso.parsers.winreg_plugins import officemru
from tests.parsers.winreg_plugins import test_lib
class OfficeMRUPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = officemru.OfficeMRUPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\'
'Access\\File MRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\'
'Access\\Place MRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\'
'Excel\\File MRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\'
'Excel\\Place MRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\'
'PowerPoint\\File MRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\'
'PowerPoint\\Place MRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\'
'Word\\File MRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\'
'Word\\Place MRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\Word\\'
'File MRU')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = officemru.OfficeMRUPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_events, 6)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'date_time': '2012-03-13 18:27:15.0898020',
'data_type': 'windows:registry:office_mru_list',
'entries': (
'Item 1: [F00000000][T01CD0146EA1EADB0][O00000000]*'
'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\'
'SA-23E Mitchell-Hyundyne Starfury.docx '
'Item 2: [F00000000][T01CD00921FC127F0][O00000000]*'
'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\Earthforce '
'SA-26 Thunderbolt Star Fury.docx '
'Item 3: [F00000000][T01CD009208780140][O00000000]*'
'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\StarFury.docx '
'Item 4: [F00000000][T01CCFE0B22DA9EF0][O00000000]*'
'C:\\Users\\nfury\\Documents\\VIBRANIUM.docx '
'Item 5: [F00000000][T01CCFCBA595DFC30][O00000000]*'
'C:\\Users\\nfury\\Documents\\ADAMANTIUM-Background.docx'),
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.NAME,
'timestamp_desc': definitions.TIME_DESCRIPTION_WRITTEN}
self.CheckEventValues(storage_writer, events[5], expected_event_values)
# Test OfficeMRUWindowsRegistryEvent.
expected_value_string = (
'[F00000000][T01CD0146EA1EADB0][O00000000]*'
'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\'
'SA-23E Mitchell-Hyundyne Starfury.docx')
expected_event_values = {
'date_time': '2012-03-13 18:27:15.0830000',
'data_type': 'windows:registry:office_mru',
'key_path': key_path,
'timestamp_desc': definitions.TIME_DESCRIPTION_WRITTEN,
'value_string': expected_value_string}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,769,384,684,929,349,000 | 35.893443 | 77 | 0.652966 | false | 3.309559 | true | false | false |
ernestyalumni/Propulsion | Physique/Source/TemperatureConversion.py | 1 | 1435 | """
@name TemperatureConversion.py
@file TemperatureConversion.py
@author Ernest Yeung
@date 20150913
@email [email protected]
@brief I implement temperature conversion with symbolic computation in sympy
@ref
@details
@copyright If you find this code useful, feel free to donate directly and easily
at this direct PayPal link:
https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
Otherwise, I receive emails and messages on how all my (free) material on
physics, math, and engineering have helped students with their studies, and I
know what it's like to not have money as a student, but love physics (or math,
sciences, etc.), so I am committed to keeping all my material open-source and
free, whether or not sufficiently crowdfunded, under the open-source MIT
license: feel free to copy, edit, paste, make your own versions, share, use as
you wish.
Peace out, never give up! -EY
"""
import sympy
from sympy import Eq
from sympy import Rational as Rat
from sympy import symbols
from sympy.solvers import solve
T_F, T_C, T_K = symbols("T_F T_C T_K", real=True)
FahrenheitCelsiusConversion = Eq(T_F, T_C * (Rat(9) / Rat(5)) + Rat(32))
KelvinCelsiusConversion = Eq(T_K, T_C + 273.15)
| gpl-2.0 | -8,275,377,455,991,462,000 | 38.861111 | 210 | 0.772125 | false | 3.014706 | false | true | false |
RRCKI/pilot | VmPeak.py | 4 | 3453 | # This script is run by the PanDA pilot
# Its purpose is to extract the VmPeak, its average and the rss value from the *.pmon.gz file(s), and
# place these values in a file (VmPeak_values.txt) which in turn is read back by the pilot
# VmPeak_values.txt has the format:
# <VmPeak max>,<VmPeam max mean>,<rss mean>
# Note that for a composite trf, there are multiple *.pmon.gz files. The values reported are the max values of all files (thus the 'max average')
# Prerequisite: the ATLAS environment needs to have been setup before the script is run
import os
def processFiles():
""" Process the PerfMon files using PerfMonComps """
vmem_peak_max = 0
vmem_mean_max = 0
rss_mean_max = 0
# get list of all PerfMon files
from glob import glob
file_list = glob("*.pmon.gz")
if file_list != []:
# loop over all files
for file_name in file_list:
# process this file using PerfMonComps
print "[Pilot VmPeak] Processing file: %s" % (file_name)
info = PerfMonComps.PMonSD.parse(file_name)
if info:
vmem_peak = info[0]['special']['values']['vmem_peak']
vmem_mean = info[0]['special']['values']['vmem_mean']
rss_mean = info[0]['special']['values']['rss_mean']
print "[Pilot VmPeak] vmem_peak = %.1f, vmem_mean = %.1f, rss_mean = %.1f" % (vmem_peak, vmem_mean, rss_mean)
if vmem_peak > vmem_peak_max:
vmem_peak_max = vmem_peak
if vmem_mean > vmem_mean_max:
vmem_mean_max = vmem_mean
if rss_mean > rss_mean_max:
rss_mean_max = rss_mean
else:
print "!!WARNING!!1212!! PerfMonComps.PMonSD.parse returned None while parsing file %s" % (file_name)
# convert to integers
vmem_peak_max = int(vmem_peak_max)
vmem_mean_max = int(vmem_mean_max)
rss_mean_max = int(rss_mean_max)
else:
print "[Pilot VmPeak] Did not find any PerfMon log files"
return vmem_peak_max, vmem_mean_max, rss_mean_max
def dumpValues(vmem_peak_max, vmem_mean_max, rss_mean_max):
""" Create the VmPeak_values.txt file"""
file_name = os.path.join(os.getcwd(), "VmPeak_values.txt")
print "[Pilot VmPeak] Creating file: %s" % (file_name)
try:
f = open(file_name, "w")
except OSError, e:
print "[Pilot VmPeak] Could not create %s" % (file_name)
else:
s = "%d,%d,%d" % (vmem_peak_max, vmem_mean_max, rss_mean_max)
f.write(s)
f.close()
print "[Pilot VmPeak] Wrote values to file %s" % (file_name)
# main function
if __name__ == "__main__":
try:
import PerfMonComps.PMonSD
except Exception, e:
print "Failed to import PerfMonComps.PMonSD: %s" % (e)
print "Aborting VmPeak script"
else:
vmem_peak_max, vmem_mean_max, rss_mean_max = processFiles()
if vmem_peak_max == 0 and vmem_mean_max == 0 and rss_mean_max == 0:
print "[Pilot VmPeak] All VmPeak and RSS values zero, will not create VmPeak values file"
else:
print "[Pilot VmPeak] vmem_peak_max = %d, vmem_mean_max = %d, rss_mean_max = %d" % (vmem_peak_max, vmem_mean_max, rss_mean_max)
# create the VmPeak_values.txt file
dumpValues(vmem_peak_max, vmem_mean_max, rss_mean_max)
print "[Pilot VmPeak] Done"
| apache-2.0 | -2,558,018,875,590,486,000 | 38.238636 | 145 | 0.593687 | false | 3.215084 | false | false | false |
gdikov/vae-playground | third_party/ite/cost/base_a.py | 1 | 19263 | """ Base association measure estimators. """
from numpy import mean, prod, triu, ones, dot, sum, maximum, all
from scipy.special import binom
from ite.cost.x_initialization import InitX
from ite.cost.x_verification import VerOneDSubspaces, VerCompSubspaceDims
from ite.shared import copula_transformation
class BASpearman1(InitX, VerOneDSubspaces, VerCompSubspaceDims):
""" Estimator of the first multivariate extension of Spearman's rho.
Initialization is inherited from 'InitX', verification capabilities
come from 'VerOneDSubspaces' and 'VerCompSubspaceDims' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
Examples
--------
>>> import ite
>>> co = ite.cost.BASpearman1()
"""
def estimation(self, y, ds=None):
""" Estimate the first multivariate extension of Spearman's rho.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector, vector of ones
ds[i] = 1 (for all i): the i^th subspace is one-dimensional.
If ds is not given (ds=None), the vector of ones [ds =
ones(y.shape[1],dtype='int')] is emulated inside the function.
Returns
-------
a : float
Estimated first multivariate extension of Spearman's rho.
References
----------
Friedrich Shmid, Rafael Schmidt, Thomas Blumentritt, Sandra
Gaiser, and Martin Ruppert. Copula Theory and Its Applications,
Chapter Copula based Measures of Multivariate Association. Lecture
Notes in Statistics. Springer, 2010.
Friedrich Schmid and Rafael Schmidt. Multivariate extensions of
Spearman's rho and related statistics. Statistics & Probability
Letters, 77:407-416, 2007.
Roger B. Nelsen. Nonparametric measures of multivariate
association. Lecture Notes-Monograph Series, Distributions with
Fixed Marginals and Related Topics, 28:223-232, 1996.
Edward F. Wolff. N-dimensional measures of dependence.
Stochastica, 4:175-188, 1980.
C. Spearman. The proof and measurement of association between two
things. The American Journal of Psychology, 15:72-101, 1904.
Examples
--------
a = co.estimation(y,ds)
"""
if ds is None: # emulate 'ds = vector of ones'
ds = ones(y.shape[1], dtype='int')
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_one_dimensional_subspaces(ds)
dim = y.shape[1] # dimension
u = copula_transformation(y)
h = (dim + 1) / (2**dim - (dim + 1)) # h_rho(dim)
a = h * (2**dim * mean(prod(1 - u, axis=1)) - 1)
return a
class BASpearman2(InitX, VerOneDSubspaces, VerCompSubspaceDims):
""" Estimator of the second multivariate extension of Spearman's rho.
Initialization is inherited from 'InitX', verification capabilities
come from 'VerOneDSubspaces' and 'VerCompSubspaceDims' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
Examples
--------
>>> import ite
>>> co = ite.cost.BASpearman2()
"""
def estimation(self, y, ds=None):
""" Estimate the second multivariate extension of Spearman's rho.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector, vector of ones
ds[i] = 1 (for all i): the i^th subspace is one-dimensional.
If ds is not given (ds=None), the vector of ones [ds =
ones(y.shape[1],dtype='int')] is emulated inside the function.
Returns
-------
a : float
Estimated second multivariate extension of Spearman's rho.
References
----------
Friedrich Shmid, Rafael Schmidt, Thomas Blumentritt, Sandra
Gaiser, and Martin Ruppert. Copula Theory and Its Applications,
Chapter Copula based Measures of Multivariate Association. Lecture
Notes in Statistics. Springer, 2010.
Friedrich Schmid and Rafael Schmidt. Multivariate extensions of
Spearman's rho and related statistics. Statistics & Probability
Letters, 77:407-416, 2007.
Roger B. Nelsen. Nonparametric measures of multivariate
association. Lecture Notes-Monograph Series, Distributions with
Fixed Marginals and Related Topics, 28:223-232, 1996.
Harry Joe. Multivariate concordance. Journal of Multivariate
Analysis, 35:12-30, 1990.
C. Spearman. The proof and measurement of association between two
things. The American Journal of Psychology, 15:72-101, 1904.
Examples
--------
a = co.estimation(y,ds)
"""
if ds is None: # emulate 'ds = vector of ones'
ds = ones(y.shape[1], dtype='int')
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_one_dimensional_subspaces(ds)
dim = y.shape[1] # dimension
u = copula_transformation(y)
h = (dim + 1) / (2**dim - (dim + 1)) # h_rho(dim)
a = h * (2**dim * mean(prod(u, axis=1)) - 1)
return a
class BASpearman3(InitX, VerOneDSubspaces, VerCompSubspaceDims):
""" Estimator of the third multivariate extension of Spearman's rho.
Initialization is inherited from 'InitX', verification capabilities
come from 'VerOneDSubspaces' and 'VerCompSubspaceDims' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
Examples
--------
>>> import ite
>>> co = ite.cost.BASpearman3()
"""
def estimation(self, y, ds=None):
""" Estimate the third multivariate extension of Spearman's rho.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector, vector of ones
ds[i] = 1 (for all i): the i^th subspace is one-dimensional.
If ds is not given (ds=None), the vector of ones [ds =
ones(y.shape[1],dtype='int')] is emulated inside the function.
Returns
-------
a : float
Estimated third multivariate extension of Spearman's rho.
References
----------
Friedrich Shmid, Rafael Schmidt, Thomas Blumentritt, Sandra
Gaiser, and Martin Ruppert. Copula Theory and Its Applications,
Chapter Copula based Measures of Multivariate Association. Lecture
Notes in Statistics. Springer, 2010.
Roger B. Nelsen. An Introduction to Copulas (Springer Series in
Statistics). Springer, 2006.
Roger B. Nelsen. Distributions with Given Marginals and
Statistical Modelling, chapter Concordance and copulas: A survey,
pages 169-178. Kluwer Academic Publishers, Dordrecht, 2002.
C. Spearman. The proof and measurement of association between two
things. The American Journal of Psychology, 15:72-101, 1904.
Examples
--------
a = co.estimation(y,ds)
"""
if ds is None: # emulate 'ds = vector of ones'
ds = ones(y.shape[1], dtype='int')
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_one_dimensional_subspaces(ds)
dim = y.shape[1] # dimension
u = copula_transformation(y)
h = (dim + 1) / (2**dim - (dim + 1)) # h_rho(d)
a1 = h * (2**dim * mean(prod(1 - u, axis=1)) - 1)
a2 = h * (2**dim * mean(prod(u, axis=1)) - 1)
a = (a1 + a2) / 2
return a
class BASpearman4(InitX, VerOneDSubspaces, VerCompSubspaceDims):
""" Estimator of the fourth multivariate extension of Spearman's rho.
Initialization is inherited from 'InitX', verification capabilities
come from 'VerOneDSubspaces' and 'VerCompSubspaceDims'; (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
Examples
--------
>>> import ite
>>> co = ite.cost.BASpearman4()
"""
def estimation(self, y, ds=None):
""" Estimate the fourth multivariate extension of Spearman's rho.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector, vector of ones
ds[i] = 1 (for all i): the i^th subspace is one-dimensional.
If ds is not given (ds=None), the vector of ones [ds =
ones(y.shape[1],dtype='int')] is emulated inside the function.
Returns
-------
a : float
Estimated fourth multivariate extension of Spearman's rho.
References
----------
Friedrich Shmid, Rafael Schmidt, Thomas Blumentritt, Sandra
Gaiser, and Martin Ruppert. Copula Theory and Its Applications,
Chapter Copula based Measures of Multivariate Association. Lecture
Notes in Statistics. Springer, 2010.
Friedrich Schmid and Rafael Schmidt. Multivariate extensions of
Spearman's rho and related statistics. Statistics & Probability
Letters, 77:407-416, 2007.
Maurice G. Kendall. Rank correlation methods. London, Griffin,
1970.
C. Spearman. The proof and measurement of association between two
things. The American Journal of Psychology, 15:72-101, 1904.
Examples
--------
a = co.estimation(y,ds)
"""
if ds is None: # emulate 'ds = vector of ones'
ds = ones(y.shape[1], dtype='int')
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_one_dimensional_subspaces(ds)
num_of_samples, dim = y.shape # number of samples, dimension
u = copula_transformation(y)
m_triu = triu(ones((dim, dim)), 1) # upper triangular mask
b = binom(dim, 2)
a = 12 * sum(dot((1 - u).T, (1 - u)) * m_triu) /\
(b * num_of_samples) - 3
return a
class BASpearmanCondLT(InitX, VerOneDSubspaces, VerCompSubspaceDims):
""" Estimate multivariate conditional version of Spearman's rho.
The measure weights the lower tail of the copula.
Partial initialization comes from 'InitX'; verification capabilities
are inherited from 'VerOneDSubspaces' and 'VerCompSubspaceDims' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, p=0.5):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
p : float, 0<p<=1, optional
(default is 0.5)
Examples
--------
>>> import ite
>>> co1 = ite.cost.BASpearmanCondLT()
>>> co2 = ite.cost.BASpearmanCondLT(p=0.4)
"""
# initialize with 'InitX':
super().__init__(mult=mult)
# p:
self.p = p
def estimation(self, y, ds=None):
""" Estimate multivariate conditional version of Spearman's rho.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector, vector of ones
ds[i] = 1 (for all i): the i^th subspace is one-dimensional.
If ds is not given (ds=None), the vector of ones [ds =
ones(y.shape[1],dtype='int')] is emulated inside the function.
Returns
-------
a : float
Estimated multivariate conditional version of Spearman's rho.
References
----------
Friedrich Schmid and Rafael Schmidt. Multivariate conditional
versions of Spearman's rho and related measures of tail dependence.
Journal of Multivariate Analysis, 98:1123-1140, 2007.
C. Spearman. The proof and measurement of association between two
things. The American Journal of Psychology, 15:72-101, 1904.
Examples
--------
a = co.estimation(y,ds)
"""
if ds is None: # emulate 'ds = vector of ones'
ds = ones(y.shape[1], dtype='int')
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_one_dimensional_subspaces(ds)
num_of_samples, dim = y.shape # number of samples, dimension
u = copula_transformation(y)
c1 = (self.p**2 / 2)**dim
c2 = self.p**(dim + 1) / (dim + 1)
a = (mean(prod(maximum(self.p - u, 0), axis=1)) - c1) / (c2 - c1)
return a
class BASpearmanCondUT(InitX, VerOneDSubspaces, VerCompSubspaceDims):
""" Estimate multivariate conditional version of Spearman's rho.
The measure weights the upper tail of the copula.
Partial initialization comes from 'InitX'; verification capabilities
are inherited from 'VerOneDSubspaces' and 'VerCompSubspaceDims' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, p=0.5):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
p : float, 0<p<=1, optional
(default is 0.5)
Examples
--------
>>> import ite
>>> co1 = ite.cost.BASpearmanCondUT()
>>> co2 = ite.cost.BASpearmanCondUT(p=0.4)
"""
# initialize with 'InitX':
super().__init__(mult=mult)
# p:
self.p = p
def estimation(self, y, ds=None):
""" Estimate multivariate conditional version of Spearman's rho.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector, vector of ones
ds[i] = 1 (for all i): the i^th subspace is one-dimensional.
If ds is not given (ds=None), the vector of ones [ds =
ones(y.shape[1],dtype='int')] is emulated inside the function.
Returns
-------
a : float
Estimated multivariate conditional version of Spearman's rho.
References
----------
Friedrich Schmid and Rafael Schmidt. Multivariate conditional
versions of Spearman's rho and related measures of tail
dependence. Journal of Multivariate Analysis, 98:1123-1140, 2007.
C. Spearman. The proof and measurement of association between two
things. The American Journal of Psychology, 15:72-101, 1904.
Examples
--------
a = co.estimation(y,ds)
"""
if ds is None: # emulate 'ds = vector of ones'
ds = ones(y.shape[1], dtype='int')
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_one_dimensional_subspaces(ds)
num_of_samples, dim = y.shape # number of samples, dimension
u = copula_transformation(y)
c = mean(prod(1 - maximum(u, 1 - self.p), axis=1))
c1 = (self.p * (2 - self.p) / 2)**dim
c2 = self.p**dim * (dim + 1 - self.p * dim) / (dim + 1)
a = (c - c1) / (c2 - c1)
return a
class BABlomqvist(InitX, VerOneDSubspaces, VerCompSubspaceDims):
""" Estimator of the multivariate extension of Blomqvist's beta.
Blomqvist's beta is also known as the medial correlation coefficient.
Initialization is inherited from 'InitX', verification capabilities
come from 'VerOneDSubspaces' and 'VerCompSubspaceDims'
('ite.cost.x_classes.py').
Initialization is inherited from 'InitX', verification capabilities
come from 'VerOneDSubspaces' and 'VerCompSubspaceDims' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
Examples
--------
>>> import ite
>>> co = ite.cost.BABlomqvist()
"""
def estimation(self, y, ds=None):
""" Estimate multivariate extension of Blomqvist's beta.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector, vector of ones
ds[i] = 1 (for all i): the i^th subspace is one-dimensional.
If ds is not given (ds=None), the vector of ones [ds =
ones(y.shape[1],dtype='int')] is emulated inside the function.
Returns
-------
a : float
Estimated multivariate extension of Blomqvist's beta.
References
----------
Friedrich Schmid, Rafael Schmidt, Thomas Blumentritt, Sandra
Gaiser, and Martin Ruppert. Copula Theory and Its Applications,
Chapter Copula based Measures of Multivariate Association. Lecture
Notes in Statistics. Springer, 2010. (multidimensional case,
len(ds)>=2)
Manuel Ubeda-Flores. Multivariate versions of Blomqvist's beta and
Spearman's footrule. Annals of the Institute of Statistical
Mathematics, 57:781-788, 2005.
Nils Blomqvist. On a measure of dependence between two random
variables. The Annals of Mathematical Statistics, 21:593-600, 1950.
(2D case, statistical properties)
Frederick Mosteller. On some useful ''inefficient'' statistics.
Annals of Mathematical Statistics, 17:377--408, 1946. (2D case,
def)
Examples
--------
a = co.estimation(y,ds)
"""
if ds is None: # emulate 'ds = vector of ones'
ds = ones(y.shape[1], dtype='int')
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_one_dimensional_subspaces(ds)
num_of_samples, dim = y.shape # number of samples, dimension
u = copula_transformation(y)
h = 2**(dim - 1) / (2**(dim - 1) - 1) # h(dim)
c1 = mean(all(u <= 1/2, axis=1)) # C(1/2)
c2 = mean(all(u > 1/2, axis=1)) # \bar{C}(1/2)
a = h * (c1 + c2 - 2**(1 - dim))
return a
| mit | 8,382,078,373,014,410,000 | 33.708108 | 79 | 0.581633 | false | 3.928819 | false | false | false |
ChinaQuants/Engine | Examples/Example_1/xlwings/excel_view.py | 2 | 1929 | import xlwings as xw
from xlwings import Workbook, Sheet, Range, Chart
import time
import datetime
from subprocess import call
import matplotlib.pyplot as plt
# connect to the active workbook
#wb = Workbook('run.xlsm')
wb = Workbook.active()
# log status
Range('B8').value = 'running upload ...'
file = 'Output/exposure_trade_Swap_20y.csv'
# load data into arrays and cells
x = []
y = []
z = []
line = 2
import csv
with open(file) as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
Range('H1').value = 'Time'
Range('I1').value = 'EPE'
Range('J1').value = 'ENE'
for row in reader:
x.append(float(row['Time']))
y.append(float(row['EPE']))
z.append(float(row['ENE']))
Range('H' + str(line)).value = float(row['Time'])
Range('I' + str(line)).value = float(row['EPE'])
Range('J' + str(line)).value = float(row['ENE'])
line = line + 1
# add chart
cellrange = str("H1:J") + str(line)
chart = xw.Chart.add(source_data=xw.Range(cellrange).table)
chart.name = 'chart'
chart.chart_type = xw.ChartType.xlLine
chart.top = 200
chart.left = 0
chart.height = 250
chart.width = 350
chart.title = 'Exposure Evolution'
chart.xlabel = 'Time / Years'
chart.ylabel = 'Exposure'
# log status
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
Range('B8').value = st + " Upload completed"
# add same plot again using matplotlib
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title("Exposure Evolution")
ax.set_xlabel("Time / Years")
ax.set_ylabel("Exposure")
ax.plot(x,y, label="EPE")
ax.plot(x,z, label="ENE")
legend = ax.legend(loc="upper right")
plot = xw.Plot(fig)
plot.show('plot', left=xw.Range('A33').left, top=xw.Range('A33').top, width=350, height=250)
# log status
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
xw.Range('B8').value = st + " Upload completed"
| bsd-3-clause | 6,174,453,772,711,315,000 | 25.791667 | 92 | 0.652151 | false | 2.840943 | false | false | false |
KellenSunderland/sockeye | sockeye/data_io.py | 1 | 25786 | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Implements data iterators and I/O related functions for sequence-to-sequence models.
"""
import bisect
import gzip
import logging
import pickle
import random
from collections import OrderedDict
from typing import Dict, Iterator, Iterable, List, NamedTuple, Optional, Tuple
import mxnet as mx
import numpy as np
from sockeye.utils import check_condition
from . import config
from . import constants as C
logger = logging.getLogger(__name__)
def define_buckets(max_seq_len: int, step=10) -> List[int]:
"""
Returns a list of integers defining bucket boundaries.
Bucket boundaries are created according to the following policy:
We generate buckets with a step size of step until the final bucket fits max_seq_len.
We then limit that bucket to max_seq_len (difference between semi-final and final bucket may be less than step).
:param max_seq_len: Maximum bucket size.
:param step: Distance between buckets.
:return: List of bucket sizes.
"""
buckets = [bucket_len for bucket_len in range(step, max_seq_len + step, step)]
buckets[-1] = max_seq_len
return buckets
def define_parallel_buckets(max_seq_len_source: int,
max_seq_len_target: int,
bucket_width=10,
length_ratio=1.0) -> List[Tuple[int, int]]:
"""
Returns (source, target) buckets up to (max_seq_len_source, max_seq_len_target). The longer side of the data uses
steps of bucket_width while the shorter side uses steps scaled down by the average target/source length ratio. If
one side reaches its max_seq_len before the other, width of extra buckets on that side is fixed to that max_seq_len.
:param max_seq_len_source: Maximum source bucket size.
:param max_seq_len_target: Maximum target bucket size.
:param bucket_width: Width of buckets on longer side.
:param length_ratio: Length ratio of data (target/source).
"""
source_step_size = bucket_width
target_step_size = bucket_width
if length_ratio >= 1.0:
# target side is longer -> scale source
source_step_size = max(1, int(bucket_width / length_ratio))
else:
# source side is longer, -> scale target
target_step_size = max(1, int(bucket_width * length_ratio))
source_buckets = define_buckets(max_seq_len_source, step=source_step_size)
target_buckets = define_buckets(max_seq_len_target, step=target_step_size)
# Extra buckets
if len(source_buckets) < len(target_buckets):
source_buckets += [source_buckets[-1] for _ in range(len(target_buckets) - len(source_buckets))]
elif len(target_buckets) < len(source_buckets):
target_buckets += [target_buckets[-1] for _ in range(len(source_buckets) - len(target_buckets))]
# minimum bucket size is 2 (as we add BOS symbol to target side)
source_buckets = [max(2, b) for b in source_buckets]
target_buckets = [max(2, b) for b in target_buckets]
parallel_buckets = list(zip(source_buckets, target_buckets))
# deduplicate for return
return list(OrderedDict.fromkeys(parallel_buckets))
def get_bucket(seq_len: int, buckets: List[int]) -> Optional[int]:
"""
Given sequence length and a list of buckets, return corresponding bucket.
:param seq_len: Sequence length.
:param buckets: List of buckets.
:return: Chosen bucket.
"""
bucket_idx = bisect.bisect_left(buckets, seq_len)
if bucket_idx == len(buckets):
return None
return buckets[bucket_idx]
def read_parallel_corpus(data_source: str,
data_target: str,
vocab_source: Dict[str, int],
vocab_target: Dict[str, int]) -> Tuple[List[List[int]], List[List[int]]]:
"""
Loads source and target data, making sure they have the same length.
:param data_source: Path to source training data.
:param data_target: Path to target training data.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:return: Tuple of (source sentences, target sentences).
"""
source_sentences = read_sentences(data_source, vocab_source, add_bos=False)
target_sentences = read_sentences(data_target, vocab_target, add_bos=True)
check_condition(len(source_sentences) == len(target_sentences),
"Number of source sentences does not match number of target sentences")
return source_sentences, target_sentences
def get_training_data_iters(source: str, target: str,
validation_source: str, validation_target: str,
vocab_source: Dict[str, int], vocab_target: Dict[str, int],
batch_size: int,
fill_up: str,
max_seq_len_source: int,
max_seq_len_target: int,
bucketing: bool,
bucket_width: int) -> Tuple['ParallelBucketSentenceIter', 'ParallelBucketSentenceIter']:
"""
Returns data iterators for training and validation data.
:param source: Path to source training data.
:param target: Path to target training data.
:param validation_source: Path to source validation data.
:param validation_target: Path to target validation data.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:param batch_size: Batch size.
:param fill_up: Fill-up strategy for buckets.
:param max_seq_len_source: Maximum source sequence length.
:param max_seq_len_target: Maximum target sequence length.
:param bucketing: Whether to use bucketing.
:param bucket_width: Size of buckets.
:return: Tuple of (training data iterator, validation data iterator).
"""
logger.info("Creating train data iterator")
train_source_sentences, train_target_sentences = read_parallel_corpus(source,
target,
vocab_source,
vocab_target)
length_ratio = sum(len(t) / float(len(s)) for t, s in zip(train_target_sentences, train_source_sentences)) / len(
train_target_sentences)
logger.info("Average training target/source length ratio: %.2f", length_ratio)
# define buckets
buckets = define_parallel_buckets(max_seq_len_source,
max_seq_len_target,
bucket_width,
length_ratio) if bucketing else [
(max_seq_len_source, max_seq_len_target)]
train_iter = ParallelBucketSentenceIter(train_source_sentences,
train_target_sentences,
buckets,
batch_size,
vocab_target[C.EOS_SYMBOL],
C.PAD_ID,
vocab_target[C.UNK_SYMBOL],
fill_up=fill_up)
logger.info("Creating validation data iterator")
val_source_sentences, val_target_sentences = read_parallel_corpus(validation_source,
validation_target,
vocab_source,
vocab_target)
val_iter = ParallelBucketSentenceIter(val_source_sentences,
val_target_sentences,
buckets,
batch_size,
vocab_target[C.EOS_SYMBOL],
C.PAD_ID,
vocab_target[C.UNK_SYMBOL],
fill_up=fill_up)
return train_iter, val_iter
class DataConfig(config.Config):
"""
Stores data paths from training.
"""
def __init__(self,
source: str,
target: str,
validation_source: str,
validation_target: str,
vocab_source: str,
vocab_target: str) -> None:
super().__init__()
self.source = source
self.target = target
self.validation_source = validation_source
self.validation_target = validation_target
self.vocab_source = vocab_source
self.vocab_target = vocab_target
def smart_open(filename: str, mode="rt", ftype="auto", errors='replace'):
"""
Returns a file descriptor for filename with UTF-8 encoding.
If mode is "rt", file is opened read-only.
If ftype is "auto", uses gzip iff filename endswith .gz.
If ftype is {"gzip","gz"}, uses gzip.
Note: encoding error handling defaults to "replace"
:param filename: The filename to open.
:param mode: Reader mode.
:param ftype: File type. If 'auto' checks filename suffix for gz to try gzip.open
:param errors: Encoding error handling during reading. Defaults to 'replace'
:return: File descriptor
"""
if ftype == 'gzip' or ftype == 'gz' or (ftype == 'auto' and filename.endswith(".gz")):
return gzip.open(filename, mode=mode, encoding='utf-8', errors=errors)
else:
return open(filename, mode=mode, encoding='utf-8', errors=errors)
def read_content(path: str, limit=None) -> Iterator[List[str]]:
"""
Returns a list of tokens for each line in path up to a limit.
:param path: Path to files containing sentences.
:param limit: How many lines to read from path.
:return: Iterator over lists of words.
"""
with smart_open(path) as indata:
for i, line in enumerate(indata):
if limit is not None and i == limit:
break
yield list(get_tokens(line))
def get_tokens(line: str) -> Iterator[str]:
"""
Yields tokens from input string.
:param line: Input string.
:return: Iterator over tokens.
"""
for token in line.rstrip().split():
if len(token) > 0:
yield token
def tokens2ids(tokens: Iterable[str], vocab: Dict[str, int]) -> List[int]:
"""
Returns sequence of ids given a sequence of tokens and vocab.
:param tokens: List of tokens.
:param vocab: Vocabulary (containing UNK symbol).
:return: List of word ids.
"""
return [vocab.get(w, vocab[C.UNK_SYMBOL]) for w in tokens]
def read_sentences(path: str, vocab: Dict[str, int], add_bos=False, limit=None) -> List[List[int]]:
"""
Reads sentences from path and creates word id sentences.
:param path: Path to read data from.
:param vocab: Vocabulary mapping.
:param add_bos: Whether to add Beginning-Of-Sentence (BOS) symbol.
:param limit: Read limit.
:return: List of integer sequences.
"""
assert C.UNK_SYMBOL in vocab
assert C.UNK_SYMBOL in vocab
assert vocab[C.PAD_SYMBOL] == C.PAD_ID
assert C.BOS_SYMBOL in vocab
assert C.EOS_SYMBOL in vocab
sentences = []
for sentence_tokens in read_content(path, limit):
sentence = tokens2ids(sentence_tokens, vocab)
check_condition(sentence, "Empty sentence in file %s" % path)
if add_bos:
sentence.insert(0, vocab[C.BOS_SYMBOL])
sentences.append(sentence)
logger.info("%d sentences loaded from '%s'", len(sentences), path)
return sentences
def get_default_bucket_key(buckets: List[Tuple[int, int]]) -> Tuple[int, int]:
"""
Returns the default bucket from a list of buckets, i.e. the largest bucket.
:param buckets: List of buckets.
:return: The largest bucket in the list.
"""
return max(buckets)
def get_parallel_bucket(buckets: List[Tuple[int, int]],
length_source: int,
length_target: int) -> Optional[Tuple[int, Tuple[int, int]]]:
"""
Returns bucket index and bucket from a list of buckets, given source and target length.
Returns (None, None) if no bucket fits.
:param buckets: List of buckets.
:param length_source: Length of source sequence.
:param length_target: Length of target sequence.
:return: Tuple of (bucket index, bucket), or (None, None) if not fitting.
"""
bucket = None, None
for j, (source_bkt, target_bkt) in enumerate(buckets):
if source_bkt >= length_source and target_bkt >= length_target:
bucket = j, (source_bkt, target_bkt)
break
return bucket
# TODO: consider more memory-efficient data reading (load from disk on demand)
# TODO: consider using HDF5 format for language data
class ParallelBucketSentenceIter(mx.io.DataIter):
"""
A Bucket sentence iterator for parallel data. Randomly shuffles the data after every call to reset().
Data is stored in NDArrays for each epoch for fast indexing during iteration.
:param source_sentences: List of source sentences (integer-coded).
:param target_sentences: List of target sentences (integer-coded).
:param buckets: List of buckets.
:param batch_size: Batch_size of generated data batches.
Incomplete batches are discarded if fill_up == None, or filled up according to the fill_up strategy.
:param fill_up: If not None, fill up bucket data to a multiple of batch_size to avoid discarding incomplete batches.
for each bucket. If set to 'replicate', sample examples from the bucket and use them to fill up.
:param eos_id: Word id for end-of-sentence.
:param pad_id: Word id for padding symbols.
:param unk_id: Word id for unknown symbols.
:param dtype: Data type of generated NDArrays.
"""
def __init__(self,
source_sentences: List[List[int]],
target_sentences: List[List[int]],
buckets: List[Tuple[int, int]],
batch_size: int,
eos_id: int,
pad_id: int,
unk_id: int,
fill_up: Optional[str] = None,
source_data_name=C.SOURCE_NAME,
source_data_length_name=C.SOURCE_LENGTH_NAME,
target_data_name=C.TARGET_NAME,
label_name=C.TARGET_LABEL_NAME,
dtype='float32'):
super(ParallelBucketSentenceIter, self).__init__()
self.buckets = list(buckets)
self.buckets.sort()
self.default_bucket_key = get_default_bucket_key(self.buckets)
self.batch_size = batch_size
self.eos_id = eos_id
self.pad_id = pad_id
self.unk_id = unk_id
self.dtype = dtype
self.source_data_name = source_data_name
self.source_data_length_name = source_data_length_name
self.target_data_name = target_data_name
self.label_name = label_name
self.fill_up = fill_up
# TODO: consider avoiding explicitly creating length and label arrays to save host memory
self.data_source = [[] for _ in self.buckets]
self.data_length = [[] for _ in self.buckets]
self.data_target = [[] for _ in self.buckets]
self.data_label = [[] for _ in self.buckets]
# assign sentence pairs to buckets
self._assign_to_buckets(source_sentences, target_sentences)
# convert to single numpy array for each bucket
self._convert_to_array()
self.provide_data = [
mx.io.DataDesc(name=source_data_name, shape=(batch_size, self.default_bucket_key[0]), layout=C.BATCH_MAJOR),
mx.io.DataDesc(name=source_data_length_name, shape=(batch_size,), layout=C.BATCH_MAJOR),
mx.io.DataDesc(name=target_data_name, shape=(batch_size, self.default_bucket_key[1]), layout=C.BATCH_MAJOR)]
self.provide_label = [
mx.io.DataDesc(name=label_name, shape=(self.batch_size, self.default_bucket_key[1]), layout=C.BATCH_MAJOR)]
self.data_names = [self.source_data_name, self.source_data_length_name, self.target_data_name]
self.label_names = [self.label_name]
# create index tuples (i,j) into buckets: i := bucket index ; j := row index of bucket array
self.idx = []
for i, buck in enumerate(self.data_source):
rest = len(buck) % batch_size
if rest > 0:
logger.info("Discarding %d samples from bucket %s due to incomplete batch", rest, self.buckets[i])
idxs = [(i, j) for j in range(0, len(buck) - batch_size + 1, batch_size)]
self.idx.extend(idxs)
self.curr_idx = 0
# holds NDArrays
self.indices = [] # This will define how the data arrays will be organized
self.nd_source = []
self.nd_length = []
self.nd_target = []
self.nd_label = []
self.reset()
def _assign_to_buckets(self, source_sentences, target_sentences):
ndiscard = 0
tokens_source = 0
tokens_target = 0
num_of_unks_source = 0
num_of_unks_target = 0
for source, target in zip(source_sentences, target_sentences):
tokens_source += len(source)
tokens_target += len(target)
num_of_unks_source += source.count(self.unk_id)
num_of_unks_target += target.count(self.unk_id)
buck_idx, buck = get_parallel_bucket(self.buckets, len(source), len(target))
if buck is None:
ndiscard += 1
continue
buff_source = np.full((buck[0],), self.pad_id, dtype=self.dtype)
buff_target = np.full((buck[1],), self.pad_id, dtype=self.dtype)
buff_label = np.full((buck[1],), self.pad_id, dtype=self.dtype)
buff_source[:len(source)] = source
buff_target[:len(target)] = target
buff_label[:len(target)] = target[1:] + [self.eos_id]
self.data_source[buck_idx].append(buff_source)
self.data_length[buck_idx].append(len(source))
self.data_target[buck_idx].append(buff_target)
self.data_label[buck_idx].append(buff_label)
logger.info("Source words: %d", tokens_source)
logger.info("Target words: %d", tokens_target)
logger.info("Vocab coverage source: %.0f%%", (1 - num_of_unks_source / tokens_source) * 100)
logger.info("Vocab coverage target: %.0f%%", (1 - num_of_unks_target / tokens_target) * 100)
logger.info('Total: {0} samples in {1} buckets'.format(len(self.data_source), len(self.buckets)))
nsamples = 0
for bkt, buck in zip(self.buckets, self.data_length):
logger.info("bucket of {0} : {1} samples".format(bkt, len(buck)))
nsamples += len(buck)
check_condition(nsamples > 0, "0 data points available in the data iterator. "
"%d data points have been discarded because they "
"didn't fit into any bucket. Consider increasing "
"the --max-seq-len to fit your data." % ndiscard)
logger.info("%d sentence pairs out of buckets", ndiscard)
logger.info("fill up mode: %s", self.fill_up)
logger.info("")
def _convert_to_array(self):
for i in range(len(self.data_source)):
self.data_source[i] = np.asarray(self.data_source[i], dtype=self.dtype)
self.data_length[i] = np.asarray(self.data_length[i], dtype=self.dtype)
self.data_target[i] = np.asarray(self.data_target[i], dtype=self.dtype)
self.data_label[i] = np.asarray(self.data_label[i], dtype=self.dtype)
n = len(self.data_source[i])
if n % self.batch_size != 0:
buck_shape = self.buckets[i]
rest = self.batch_size - n % self.batch_size
if self.fill_up == 'pad':
raise NotImplementedError
elif self.fill_up == 'replicate':
logger.info(
"Replicating %d random examples from bucket %s to size it to multiple of batch size %d", rest,
buck_shape, self.batch_size)
random_indices = np.random.randint(self.data_source[i].shape[0], size=rest)
self.data_source[i] = np.concatenate((self.data_source[i], self.data_source[i][random_indices, :]),
axis=0)
self.data_length[i] = np.concatenate((self.data_length[i], self.data_length[i][random_indices]),
axis=0)
self.data_target[i] = np.concatenate((self.data_target[i], self.data_target[i][random_indices, :]),
axis=0)
self.data_label[i] = np.concatenate((self.data_label[i], self.data_label[i][random_indices, :]),
axis=0)
def reset(self):
"""
Resets and reshuffles the data.
"""
self.curr_idx = 0
# shuffle indices
random.shuffle(self.idx)
self.nd_source = []
self.nd_length = []
self.nd_target = []
self.nd_label = []
self.indices = []
for i in range(len(self.data_source)):
# shuffle indices within each bucket
self.indices.append(np.random.permutation(len(self.data_source[i])))
self._append_ndarrays(i, self.indices[-1])
def _append_ndarrays(self, bucket: int, shuffled_indices: np.array):
"""
Appends the actual data, selected by the given indices, to the NDArrays
of the appropriate bucket. Use when reshuffling the data.
:param bucket: Current bucket.
:param shuffled_indices: Indices indicating which data to select.
"""
self.nd_source.append(mx.nd.array(self.data_source[bucket].take(shuffled_indices, axis=0), dtype=self.dtype))
self.nd_length.append(mx.nd.array(self.data_length[bucket].take(shuffled_indices, axis=0), dtype=self.dtype))
self.nd_target.append(mx.nd.array(self.data_target[bucket].take(shuffled_indices, axis=0), dtype=self.dtype))
self.nd_label.append(mx.nd.array(self.data_label[bucket].take(shuffled_indices, axis=0), dtype=self.dtype))
def iter_next(self) -> bool:
"""
True if iterator can return another batch
"""
return self.curr_idx != len(self.idx)
def next(self) -> mx.io.DataBatch:
"""
Returns the next batch from the data iterator.
"""
if not self.iter_next():
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
source = self.nd_source[i][j:j + self.batch_size]
length = self.nd_length[i][j:j + self.batch_size]
target = self.nd_target[i][j:j + self.batch_size]
data = [source, length, target]
label = [self.nd_label[i][j:j + self.batch_size]]
provide_data = [mx.io.DataDesc(name=n, shape=x.shape, layout=C.BATCH_MAJOR) for n, x in
zip(self.data_names, data)]
provide_label = [mx.io.DataDesc(name=n, shape=x.shape, layout=C.BATCH_MAJOR) for n, x in
zip(self.label_names, label)]
# TODO: num pad examples is not set here if fillup strategy would be padding
return mx.io.DataBatch(data, label,
pad=0, index=None, bucket_key=self.buckets[i],
provide_data=provide_data, provide_label=provide_label)
def save_state(self, fname: str):
"""
Saves the current state of iterator to a file, so that iteration can be
continued. Note that the data is not saved, i.e. the iterator must be
initialized with the same parameters as in the first call.
:param fname: File name to save the information to.
"""
with open(fname, "wb") as fp:
pickle.dump(self.idx, fp)
pickle.dump(self.curr_idx, fp)
np.save(fp, self.indices)
def load_state(self, fname: str):
"""
Loads the state of the iterator from a file.
:param fname: File name to load the information from.
"""
with open(fname, "rb") as fp:
self.idx = pickle.load(fp)
self.curr_idx = pickle.load(fp)
self.indices = np.load(fp)
# Because of how checkpointing is done (pre-fetching the next batch in
# each iteration), curr_idx should be always >= 1
assert self.curr_idx >= 1
# Right after loading the iterator state, next() should be called
self.curr_idx -= 1
self.nd_source = []
self.nd_length = []
self.nd_target = []
self.nd_label = []
for i in range(len(self.data_source)):
self._append_ndarrays(i, self.indices[i])
| apache-2.0 | 6,141,320,080,320,133,000 | 43.003413 | 120 | 0.591406 | false | 3.993495 | false | false | false |
Kazade/NeHe-Website | google_appengine/google/appengine/tools/devappserver2/python/runtime_test.py | 6 | 1591 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.appengine.tools.devappserver2.python.runtime."""
import unittest
import google
import mox
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2.python import runtime
class SetupStubsTest(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
def tearDown(self):
self.mox.UnsetStubs()
def test_setup_stubs(self):
self.mox.StubOutWithMock(remote_api_stub, 'ConfigureRemoteApi')
remote_api_stub.ConfigureRemoteApi('app', '/', mox.IgnoreArg(),
'somehost:12345',
use_remote_datastore=False)
config = runtime_config_pb2.Config()
config.app_id = 'app'
config.api_host = 'somehost'
config.api_port = 12345
self.mox.ReplayAll()
runtime.setup_stubs(config)
self.mox.VerifyAll()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,352,857,910,737,410,600 | 29.596154 | 74 | 0.69956 | false | 3.815348 | true | false | false |
mpirnat/aoc2016 | day04/test.py | 1 | 2286 | #!/usr/bin/env python
import unittest
from day04 import parse_room, make_checksum, is_real_room, sum_of_sectors
from day04 import decrypt_room_name, find_decrypted_room_name
class TestIdentifiesRealRooms(unittest.TestCase):
"""
aaaaa-bbb-z-y-x-123[abxyz] is a real room because the most common letters are a
(5), b (3), and then a tie between x, y, and z, which are listed alphabetically.
a-b-c-d-e-f-g-h-987[abcde] is a real room because although the letters are all
tied (1 of each), the first five are listed alphabetically.
not-a-real-room-404[oarel] is a real room.
totally-real-room-200[decoy] is not.
"""
cases = (
('aaaaa-bbb-z-y-x-123[abxyz]', ('aaaaabbbzyx', 123, 'abxyz'), 'abxyz', True),
('a-b-c-d-e-f-g-h-987[abcde]', ('abcdefgh', 987, 'abcde'), 'abcde', True),
('not-a-real-room-404[oarel]', ('notarealroom', 404, 'oarel'), 'oarel', True),
('totally-real-room-200[decoy]', ('totallyrealroom', 200, 'decoy'),
'loart', False),
)
def test_parses_rooms(self):
for room, parsed, *_ in self.cases:
self.assertEqual(parse_room(room), parsed)
def test_calculates_checksums(self):
for room, parsed, checksum, _ in self.cases:
name, sector, _ = parse_room(room)
self.assertEqual(make_checksum(name), checksum)
def test_identifies_real_rooms(self):
for room, *_, is_real in self.cases:
name, sector, checksum = parse_room(room)
self.assertEqual(is_real_room(name, checksum), is_real)
def test_sums_valid_sectors(self):
rooms = [x[0] for x in self.cases]
self.assertEqual(sum_of_sectors(rooms), 1514)
class TestDecryptingRoomNames(unittest.TestCase):
cases = (
('qzmt-zixmtkozy-ivhz-343[zimth]', 'veryencryptedname'),
)
def test_decrypts_room_names(self):
for room, expected_plaintext in self.cases:
self.assertEqual(decrypt_room_name(room), expected_plaintext)
def test_finds_decrypted_room_names(self):
rooms = ['garbage-nope-123[lol]', 'haystack-57[wtf]'] + \
[x[0] for x in self.cases]
self.assertEqual(find_decrypted_room_name('Very Encrypted', rooms), 343)
if __name__ == '__main__':
unittest.main()
| mit | 7,679,181,208,555,895,000 | 34.169231 | 86 | 0.632546 | false | 3.072581 | true | false | false |
pathfinder14/OpenSAPM | osamp/source.py | 1 | 2026 | import numpy as np
"""
This class describes the source of propagating waves
source it's a addition to initial system
this class contains defenition of many source
current contained in the variable source
Different types:
Spherical
"""
class Source:
"""This class is responsible for creating external sources of waves"""
def __init__(self, type):
self._type = type
self.coordinates_x = 5
self.coordinates_y = 5
self._source = self._get_source_by_type(type)
# TODO: make return valid value
def _get_source_by_type(self, type):
return 'Temp source value'
def _create_spherical_source(self, grid, dimension):
#TODO delete unnamed constants
if dimension == 2 and len(grid[0][0][0]) == 3:
grid[self.coordinates_x][self.coordinates_y][0] = np.array([10, 0, 0])
grid[self.coordinates_x+1][self.coordinates_y][0] = np.array([5, 0, 0])
grid[self.coordinates_x-1][self.coordinates_y][0] = np.array([5, 0, 0])
grid[self.coordinates_x][self.coordinates_y+1][0] = np.array([5, 0, 0])
grid[self.coordinates_x][self.coordinates_y-1][0] = np.array([5, 0, 0])
grid[self.coordinates_x- 1][self.coordinates_y - 1][0] = np.array([5, 0, 0])
grid[self.coordinates_x + 1][self.coordinates_y - 1][0] = np.array([5, 0, 0])
grid[self.coordinates_x - 1][self.coordinates_y +1][0] = np.array([5, 0, 0])
grid[self.coordinates_x + 1][self.coordinates_y + 1][0] = np.array([5, 0, 0])
elif dimension == 1:
grid[self.coordinates_x][0] = np.array([100, 20])
else:
grid[self.coordinates_x][self.coordinates_y][0] = np.array([10, 0, 0, 0, 0])
return grid
def update_source_in_grid(self, grid, dimension):
return self._create_spherical_source(grid, dimension)
#grid[self.coordinates] = np.array([1,1])#TODO create real source
class SourcesTypes:
def __init__(self):
pass
| mit | 184,709,055,575,299,870 | 36.518519 | 89 | 0.603653 | false | 3.354305 | false | false | false |
praveenv253/sht | sht/sht.py | 1 | 5547 | #!/usr/bin/env python3
"""
Module providing forward and inverse Spherical Harmonic Transforms.
The algorithm followed is that given in the paper:
Zubair Khalid, Rodney A. Kennedy, Jason D. McEwen, ``An Optimal-Dimensionality
Sampling Scheme on the Sphere with Fast Spherical Harmonic Transforms'', IEEE
Transactions on Signal Processing, vol. 62, no. 17, pp. 4597-4610, Sept.1, 2014
DOI: http://dx.doi.org/10.1109/TSP.2014.2337278
arXiv: http://arxiv.org/abs/1403.4661 [cs.IT]
"""
from __future__ import print_function, division
import numpy as np
import scipy.special as spl
import scipy.linalg as la
def _compute_P(thetas):
"""Computes all Pm, to be used for intermediate computations."""
L = thetas.size
P = [] # List of all Pm's
for m in range(L):
ls = np.arange(m, L)
Pm = spl.sph_harm(m, ls[np.newaxis, :], 0, thetas[:, np.newaxis])
P.append(2 * np.pi * Pm)
return P
def sht(f, thetas, phis, intermediates=None):
"""
Computes the spherical harmonic transform of f, for the grid specified by
thetas and phis. This grid must conform to a specific format.
Currently, f can be at most two dimensional. The first dimension will be
transformed.
"""
f = f.copy().astype(complex) # Shouldn't corrupt the original
L = thetas.size
# Check intermediates for P, and compute it if absent
if intermediates is None: # Caller not using intermediates
P = _compute_P(thetas)
elif 'P' in intermediates: # Caller provided P
P = intermediates['P']
else: # Caller wants P
P = _compute_P(thetas)
intermediates['P'] = P
# Compute and store the LU factors of P[m]'s, so that computing the sht
# multiple times is inexpensive
if intermediates is None:
Pm_factors = [la.lu_factor(P[m][m:, :]) for m in range(L)]
elif 'Pm_factors' in intermediates:
Pm_factors = intermediates['Pm_factors']
else:
Pm_factors = [la.lu_factor(P[m][m:, :]) for m in range(L)]
intermediates['Pm_factors'] = Pm_factors
# Initialize g: for L=4, it looks like this when complete:
# 0 * * * * * *
# 0 1 * * * * -1
# 0 1 2 * * -2 -1
# 0 1 2 3 -3 -2 -1
# The numbers here indicate the value of m. A * indicates an unused space.
# The l'th row is the FFT of the ring corresponding to theta[l].
# The m'th column (excluding the unused entries) is essentially gm.
# Thus, gm is valid only when |m| <= l, and is best indexed from -m to m.
g = np.zeros((L, 2 * L - 1) + f.shape[1:], dtype=complex)
# Intialize result vector
flm = np.zeros(f.shape, dtype=complex)
for m in reversed(range(L)):
# Update g by computing gm
# Perform (2m+1)-point FFT of the m'th phi-ring
# The sampling of f is from -m to m, whereas for the FFT, we need it to
# be from 0 to 2m+1. Hence the ifftshift.
temp = np.fft.fft(np.fft.ifftshift(f[m**2:(m+1)**2], axes=0),
axis=0) * 2 * np.pi / (2*m+1)
# Add this to the main matrix g
g[m, :m+1] = temp[:m+1]
g[m, (2*L-1-m):] = temp[m+1:]
# Solve for fm and fm_neg
fm = la.lu_solve(Pm_factors[m], g[m:, m])
if m > 0:
fm_neg = (-1)**m * la.lu_solve(Pm_factors[m], g[m:, -m])
# Store results
ls = np.arange(m, L)
flm[ls**2 + ls + m] = fm
if m > 0:
flm[ls**2 + ls - m] = fm_neg
# Compute gm for the *other* thetas
gm = np.einsum('i...,ki->k...', fm, P[m][:m, :])
if m > 0:
gm_neg = np.einsum('i...,ki->k...', fm_neg, (-1)**m * P[m][:m, :])
for k in range(m):
# Note: we won't enter this loop if m==0
# Extend dimensions of phi for proper broadcasting with g
ext_indices = ((slice(k**2, (k+1)**2),)
+ (None,) * (len(f.shape) - 1))
f_tilde = ((np.exp(1j * m * phis[ext_indices]) * gm[[k]]
+ np.exp(-1j * m * phis[ext_indices]) * gm_neg[[k]])
/ (2 * np.pi))
f[k**2:(k+1)**2] -= f_tilde
return flm
def isht(flm, thetas, phis, intermediates=None):
"""
Computes the inverse spherical harmonic transform.
"""
L = thetas.size
# Check intermediates for P, and compute it if absent
if intermediates is None: # Caller not using intermediates
P = _compute_P(thetas)
elif 'P' in intermediates: # Caller provided P
P = intermediates['P']
else: # Caller wants P
P = _compute_P(thetas)
intermediates['P'] = P
# Initialize return vector
f = np.zeros(flm.shape, dtype=complex)
for m in range(L):
ls = np.arange(m, L)
gm = np.einsum('i...,ki->k...', flm[ls**2 + ls + m], P[m])
gm_neg = np.einsum('i...,ki->k...', flm[ls**2 + ls - m],
(-1)**m * P[m])
for k in range(L):
# Extend dimensions of phi for proper broadcasting with g
ext_indices = ((slice(k**2, (k+1)**2),)
+ (None,) * (len(f.shape) - 1))
if m == 0:
f_tilde = gm[[k]] / (2 * np.pi)
else:
f_tilde = ((np.exp(-1j * m * phis[ext_indices]) * gm_neg[[k]]
+ np.exp(1j * m * phis[ext_indices]) * gm[[k]])
/ (2 * np.pi))
f[k**2:(k+1)**2] += f_tilde
return f
| mit | 5,698,515,137,838,680,000 | 35.735099 | 79 | 0.538129 | false | 3.107563 | false | false | false |
voxie-viewer/voxie | scripts/decode-qr.py | 1 | 3165 | #!/usr/bin/python
import sys
import dbus
import mmap
import numpy
import tempfile
import os
import zbar
import Image
import ImageDraw
import voxie
args = voxie.parser.parse_args()
instance = voxie.Voxie(args)
slice = instance.getSlice(args)
dataSet = slice.getDataSet()
data = dataSet.getFilteredData()
with instance.createClient() as client:
plane = slice.dbus_properties.Get('de.uni_stuttgart.Voxie.Slice', 'Plane')
origin = numpy.array(plane[0])
orientation = plane[1]
rotation = voxie.Rotation (orientation)
size = numpy.array(data.get('Size'), dtype='uint64')
dorigin = numpy.array(data.get('Origin'), dtype=numpy.double)
dspacing = numpy.array(data.get('Spacing'), dtype=numpy.double)
posmin = posmax = None
for corner in [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]:
cpos = numpy.array(corner,dtype=numpy.double) * size
cpos = rotation * (dorigin + dspacing * cpos)
if posmin is None:
posmin = cpos
if posmax is None:
posmax = cpos
posmin = numpy.minimum (posmin, cpos)
posmax = numpy.maximum (posmax, cpos)
#print (cpos)
#print (posmin)
#print (posmax)
pixSize = numpy.min (dspacing)
pos0 = (posmin[0], posmin[1], (rotation.inverse * origin)[2])
pos = rotation * pos0
width = int ((posmax[0] - posmin[0]) / pixSize + 1)
height = int ((posmax[1] - posmin[1]) / pixSize + 1)
options = {}
options['Interpolation'] = 'NearestNeighbor'
options['Interpolation'] = 'Linear'
with instance.createImage(client, (width, height)) as image, image.getDataReadonly() as buffer:
data.dbus.ExtractSlice (pos, orientation, (width, height), (pixSize, pixSize), image.path, options)
image.dbus.UpdateBuffer({})
data = numpy.array (buffer.array)
scanner = zbar.ImageScanner()
allData = ""
def scanImage(image):
global allData
zbarImage = zbar.Image(image.size[0], image.size[1], 'Y800', image.tobytes())
scanner.scan (zbarImage)
print (len(scanner.results))
for result in scanner.results:
data = '%s %s %s "%s"' % (result.type, result.quality, result.location, result.data)
allData = allData + data + "\n"
print (data)
# im2 = image.copy ().convert ('RGB')
# draw = ImageDraw.ImageDraw (im2)
# line = list(result.location)
# line.append (result.location[0])
# draw.line (line, fill='blue')
# im2.save ('/tmp/xx.png')
#print (data.dtype)
data[numpy.isnan(data)] = 0
data -= numpy.min (data)
data /= numpy.max (data)
data *= 255
image = Image.fromarray(data).convert('L')
#image.save ('/tmp/qq.png')
scanImage (image)
image = image.transpose(Image.FLIP_LEFT_RIGHT)
scanImage (image)
import gtk
md = gtk.MessageDialog(None,
gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO,
gtk.BUTTONS_CLOSE, "QT codes found:\n" + allData)
md.run()
| mit | -5,471,549,011,903,401,000 | 29.142857 | 107 | 0.593365 | false | 3.331579 | false | false | false |
jinty/zgres | zgres/show.py | 1 | 1988 | import sys
import argparse
from pprint import pformat, pprint
from .config import parse_args
from .deadman import App, willing_replicas
def indented_pprint(obj):
lines = []
for line in pformat(obj).splitlines(True):
lines.append(' ')
lines.append(line)
print(''.join(lines))
def show_cli(argv=sys.argv):
parser = argparse.ArgumentParser(description="Show zgres info")
config = parse_args(parser, argv, config_file='deadman.ini')
if config.has_section('deadman') and config['deadman'].get('plugins', '').strip():
plugins = App(config)._plugins
plugins.initialize()
all_state = list(plugins.dcs_list_state())
my_id = plugins.get_my_id()
my_state = None
for id, state in all_state:
if id == my_id:
my_state = state
break
# if deadman is configured show information about it's state
# HACK, we only need the plugins, really
print('My State:')
print(' ID: {}'.format(my_id))
if my_state is None:
role = 'not registered in zookeeper'
else:
role = my_state.get('replication_role')
print(' Replication role: {}'.format(role))
print('Cluster:')
print(' current master: {}'.format(plugins.dcs_get_lock_owner(name='master')))
print(' database identifier: {}'.format(plugins.dcs_get_database_identifier()))
print(' timeline: {}'.format(pformat(plugins.dcs_get_timeline())))
# willing_replicas is removed!
willing = list(willing_replicas(all_state))
print('\nwilling replicas:')
indented_pprint(willing)
best_replicas = list(plugins.best_replicas(states=willing))
print('\nbest replicas:')
indented_pprint(best_replicas)
print('\nall conn info:')
indented_pprint(list(plugins.dcs_list_conn_info()))
print('\nall state:')
indented_pprint(all_state)
| mit | 6,152,439,788,185,614,000 | 37.980392 | 90 | 0.605131 | false | 3.968064 | true | false | false |
gitaarik/adyengo | adyengo/migrations/0018_auto_20161013_1848.py | 1 | 4315 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-10-13 16:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adyengo', '0017_auto_20161013_1848'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='payment_method',
field=models.CharField(blank=True, choices=[('mc', 'Master Card'), ('dotpay', 'Dotpay'), ('bankTransfer_DE', 'German Banktransfer'), ('giropay', 'GiroPay'), ('diners', 'Diners Club'), ('visa', 'Visa'), ('bcmc', 'Bancontact card'), ('bankTransfer_IBAN', 'International Bank Transfer (IBAN)'), ('directdebit_NL', 'Direct Debit (Netherlands)'), ('discover', 'Discover'), ('bankTransfer', 'All banktransfers'), ('maestro', 'Maestro'), ('sepadirectdebit', 'SEPA Direct Debit'), ('amex', 'Amex'), ('directEbanking', 'SofortUberweisung'), ('bankTransfer_NL', 'Dutch Banktransfer'), ('ebanking_FI', 'Finnish E-Banking'), ('ideal', 'iDEAL'), ('card', 'All debit and credit cards'), ('elv', 'ELV'), ('paypal', 'PayPal')], max_length=50, null=True),
),
migrations.AlterField(
model_name='session',
name='country_code',
field=models.CharField(choices=[('NL', 'Netherlands'), ('GB', 'United Kingdom'), ('DE', 'Germany'), ('BE', 'Belgium')], max_length=2),
),
migrations.AlterField(
model_name='session',
name='page_type',
field=models.CharField(choices=[('skip', 'Skip'), ('single', 'Single'), ('multiple', 'Multiple')], default='skip', max_length=15),
),
migrations.AlterField(
model_name='session',
name='recurring_contract',
field=models.CharField(blank=True, choices=[('RECURRING,ONECLICK', 'Recurring and One click (user chooses)'), ('ONECLICK', 'One click'), ('RECURRING', 'Recurring')], max_length=50),
),
migrations.AlterField(
model_name='session',
name='session_type',
field=models.CharField(choices=[('api_recurring', 'API Recurring'), ('hpp_recurring', 'HPP Recurring'), ('hpp_regular', 'HPP Regular')], max_length=25),
),
migrations.AlterField(
model_name='session',
name='shopper_locale',
field=models.CharField(blank=True, choices=[('de_DE', 'German (Germany)'), ('fr_BE', 'French (Belgium)'), ('nl_NL', 'Dutch (Holland)'), ('nl_BE', 'Dutch (Belgium)'), ('en_GB', 'English (United Kingdom)')], default='nl_NL', max_length=5),
),
migrations.AlterField(
model_name='sessionallowedpaymentmethods',
name='method',
field=models.CharField(choices=[('mc', 'Master Card'), ('dotpay', 'Dotpay'), ('bankTransfer_DE', 'German Banktransfer'), ('giropay', 'GiroPay'), ('diners', 'Diners Club'), ('visa', 'Visa'), ('bcmc', 'Bancontact card'), ('bankTransfer_IBAN', 'International Bank Transfer (IBAN)'), ('directdebit_NL', 'Direct Debit (Netherlands)'), ('discover', 'Discover'), ('bankTransfer', 'All banktransfers'), ('maestro', 'Maestro'), ('sepadirectdebit', 'SEPA Direct Debit'), ('amex', 'Amex'), ('directEbanking', 'SofortUberweisung'), ('bankTransfer_NL', 'Dutch Banktransfer'), ('ebanking_FI', 'Finnish E-Banking'), ('ideal', 'iDEAL'), ('card', 'All debit and credit cards'), ('elv', 'ELV'), ('paypal', 'PayPal')], max_length=50),
),
migrations.AlterField(
model_name='sessionblockedpaymentmethods',
name='method',
field=models.CharField(choices=[('mc', 'Master Card'), ('dotpay', 'Dotpay'), ('bankTransfer_DE', 'German Banktransfer'), ('giropay', 'GiroPay'), ('diners', 'Diners Club'), ('visa', 'Visa'), ('bcmc', 'Bancontact card'), ('bankTransfer_IBAN', 'International Bank Transfer (IBAN)'), ('directdebit_NL', 'Direct Debit (Netherlands)'), ('discover', 'Discover'), ('bankTransfer', 'All banktransfers'), ('maestro', 'Maestro'), ('sepadirectdebit', 'SEPA Direct Debit'), ('amex', 'Amex'), ('directEbanking', 'SofortUberweisung'), ('bankTransfer_NL', 'Dutch Banktransfer'), ('ebanking_FI', 'Finnish E-Banking'), ('ideal', 'iDEAL'), ('card', 'All debit and credit cards'), ('elv', 'ELV'), ('paypal', 'PayPal')], max_length=50),
),
]
| lgpl-3.0 | -3,672,662,236,803,558,400 | 77.454545 | 750 | 0.604867 | false | 3.413766 | false | false | false |
gems-uff/labsys | test/admissions/test_models.py | 1 | 5521 | import unittest
from labsys.app import create_app, db
from labsys.admissions.models import (
Patient, Address, Admission, Symptom, ObservedSymptom, Method, Sample,
InfluenzaExam, Vaccine, Hospitalization, UTIHospitalization, ClinicalEvolution,)
from . import mock
class TestModelsRelationships(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
self.client = self.app.test_client(use_cookies=True)
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_patient_address_1to1(self):
patient = mock.patient()
residence = mock.address()
patient.residence = residence
self.assertEqual(patient.residence.patient, patient)
db.session.add(patient)
db.session.commit()
def test_patient_admission_1toM(self):
patient = mock.patient()
admission = mock.admission()
patient.admissions.append(admission)
self.assertEqual(admission.patient, patient)
self.assertEqual(len(patient.admissions.all()), 1)
patient = mock.patient()
admission = mock.admission()
admission.patient = patient
self.assertEqual(admission.patient, patient)
self.assertEqual(len(patient.admissions.all()), 1)
def test_admission_dated_event_1to1(self):
'''
Where dated event is a vaccine, hospitalizaion, utihospitalization or
clinicalEvolution.
That's why their constructor must be the same as MockDatedEvent.
'''
# Setup
admission = mock.admission()
vaccine = mock.dated_event(Vaccine)
# Add to admission
admission.vaccine = vaccine
# Assert they are linked
self.assertEqual(vaccine.admission.vaccine, vaccine)
# Overrides previous vaccine (since it's one-to-one)
vaccine2 = mock.dated_event(Vaccine)
vaccine2.admission = admission
# Assert it was replaced
self.assertNotEqual(admission.vaccine, vaccine)
self.assertEqual(admission.vaccine, vaccine2)
# Ensures commit works
db.session.add(admission)
db.session.commit()
self.assertEqual(vaccine2.id, 1)
self.assertIsNone(vaccine.id)
self.assertEqual(len(Admission.query.all()), 1)
self.assertEqual(len(Vaccine.query.all()), 1)
# Ensures cascade all, delete-orphan works
db.session.delete(admission)
db.session.commit()
self.assertEqual(len(Admission.query.all()), 0)
self.assertEqual(len(Vaccine.query.all()), 0)
def test_admission_symptoms_1toM(self):
# Generate mock models
admission = mock.admission()
obs_symptom0 = ObservedSymptom(
observed=True,
details='obs symptom details',
admission=admission,
symptom=Symptom(name='symptom1'),
)
obs_symptom1 = ObservedSymptom(
observed=False,
details='obs symptom details',
admission=admission,
symptom=Symptom(name='symptom2'),
)
# Assert relationship between is setup
self.assertEqual(len(admission.symptoms), 2)
self.assertEqual(obs_symptom0.admission, obs_symptom1.admission)
self.assertEqual(admission.symptoms[0], obs_symptom0)
self.assertEqual(admission.symptoms[1], obs_symptom1)
# Assert they are correctly commited
db.session.add(admission)
db.session.commit()
# Assert symptoms have the same admission_id
self.assertEqual(obs_symptom0.admission_id, obs_symptom1.admission_id)
# Assert cascade all, delete-orphan works
db.session.delete(admission)
db.session.commit()
self.assertEqual(len(Admission.query.all()), 0)
self.assertEqual(len(ObservedSymptom.query.all()), 0)
def test_syptom_observations_Mto1(self):
symptom = Symptom(name='symptom')
admission0 = mock.admission()
admission1 = mock.admission()
# id_lvrs_intern must be unique
admission1.id_lvrs_intern += 'lvrs0002'
# Generate mock models
obs_symptom0 = ObservedSymptom(
observed=True,
details='obs symptom details',
admission=admission0,
symptom=symptom
)
obs_symptom1 = ObservedSymptom(
observed=False,
details='obs symptom details',
admission=admission1,
symptom=symptom,
)
# Assert relationship is correctly setup
self.assertEqual(len(symptom.observations), 2)
self.assertEqual(symptom.observations[0], obs_symptom0)
# Collaterally, admission has relation with observed symptom
self.assertEqual(admission0.symptoms[0], obs_symptom0)
# Assert they are correctly commited
db.session.add(symptom)
db.session.commit()
# Assert symptoms have the same admission_id
self.assertEqual(obs_symptom0.symptom_id, symptom.id)
# Assert cascade all, delete-orphan works
db.session.delete(symptom)
db.session.commit()
self.assertEqual(len(Symptom.query.all()), 0)
self.assertEqual(len(ObservedSymptom.query.all()), 0)
# Collaterally, admission does not have the observed symptom
self.assertEqual(len(admission0.symptoms), 0)
| mit | 3,225,911,390,693,592,600 | 37.880282 | 84 | 0.641188 | false | 3.63942 | true | false | false |
fercopa/topography-report | src/libs/Interface/template.py | 1 | 18464 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'plantilla.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1024, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_12 = QtGui.QHBoxLayout()
self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.plane_of = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(10)
self.plane_of.setFont(font)
self.plane_of.setObjectName(_fromUtf8("plane_of"))
self.horizontalLayout.addWidget(self.plane_of)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.horizontalLayout_12.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_5 = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.horizontalLayout_2.addWidget(self.label_5)
self.perito = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(10)
self.perito.setFont(font)
self.perito.setObjectName(_fromUtf8("perito"))
self.horizontalLayout_2.addWidget(self.perito)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.horizontalLayout_12.addLayout(self.horizontalLayout_2)
self.verticalLayout.addLayout(self.horizontalLayout_12)
self.horizontalLayout_13 = QtGui.QHBoxLayout()
self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13"))
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.label_7 = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.horizontalLayout_3.addWidget(self.label_7)
self.possession = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(10)
self.possession.setFont(font)
self.possession.setObjectName(_fromUtf8("possession"))
self.horizontalLayout_3.addWidget(self.possession)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.horizontalLayout_13.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.label_9 = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_9.setFont(font)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.horizontalLayout_4.addWidget(self.label_9)
self.homeowner = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(10)
self.homeowner.setFont(font)
self.homeowner.setObjectName(_fromUtf8("homeowner"))
self.horizontalLayout_4.addWidget(self.homeowner)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem3)
self.horizontalLayout_13.addLayout(self.horizontalLayout_4)
self.verticalLayout.addLayout(self.horizontalLayout_13)
self.horizontalLayout_14 = QtGui.QHBoxLayout()
self.horizontalLayout_14.setObjectName(_fromUtf8("horizontalLayout_14"))
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.label_3 = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_5.addWidget(self.label_3)
self.location = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(10)
self.location.setFont(font)
self.location.setObjectName(_fromUtf8("location"))
self.horizontalLayout_5.addWidget(self.location)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem4)
self.horizontalLayout_14.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.label_11 = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_11.setFont(font)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.horizontalLayout_6.addWidget(self.label_11)
self.date = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(10)
self.date.setFont(font)
self.date.setObjectName(_fromUtf8("date"))
self.horizontalLayout_6.addWidget(self.date)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem5)
self.horizontalLayout_14.addLayout(self.horizontalLayout_6)
self.verticalLayout.addLayout(self.horizontalLayout_14)
# Table Result and graph
self.figure = plt.figure(figsize=(3, 3))
self.canvas = FigureCanvas(self.figure)
self.toolbarGraph = NavigationToolbar(self.canvas, self.centralwidget)
self.verticalLayoutGraph = QtGui.QVBoxLayout()
self.verticalLayoutGraph.setObjectName(_fromUtf8("verticalGraph"))
self.verticalLayoutGraph.addWidget(self.toolbarGraph)
self.verticalLayoutGraph.addWidget(self.canvas)
self.horizontalLayoutGraph = QtGui.QHBoxLayout()
self.verticalLayoutGraph.setObjectName(_fromUtf8("horizontalGraph"))
self.tableResult = QtGui.QTableWidget(self.centralwidget)
self.tableResult.setObjectName(_fromUtf8("tableResult"))
self.tableResult.setColumnCount(0)
self.tableResult.setRowCount(0)
self.horizontalLayoutGraph.addWidget(self.tableResult)
self.horizontalLayoutGraph.addLayout(self.verticalLayoutGraph)
self.verticalLayout.addLayout(self.horizontalLayoutGraph)
# self.verticalLayout.addWidget(self.tableResult)
self.horizontalLayout_7 = QtGui.QHBoxLayout()
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem6)
self.calcBtn = QtGui.QPushButton(self.centralwidget)
self.calcBtn.setObjectName(_fromUtf8("calcBtn"))
self.horizontalLayout_7.addWidget(self.calcBtn)
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem7)
self.verticalLayout.addLayout(self.horizontalLayout_7)
self.horizontalLayout_11 = QtGui.QHBoxLayout()
self.horizontalLayout_11.setObjectName(_fromUtf8("horizontalLayout_11"))
self.horizontalLayout_8 = QtGui.QHBoxLayout()
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout_8.addWidget(self.label_2)
self.label_sumAngs = QtGui.QLabel(self.centralwidget)
self.label_sumAngs.setObjectName(_fromUtf8("label_sumAngs"))
self.horizontalLayout_8.addWidget(self.label_sumAngs)
spacerItem8 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem8)
self.horizontalLayout_11.addLayout(self.horizontalLayout_8)
self.horizontalLayout_9 = QtGui.QHBoxLayout()
self.horizontalLayout_9.setObjectName(_fromUtf8("horizontalLayout_9"))
self.label_6 = QtGui.QLabel(self.centralwidget)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.horizontalLayout_9.addWidget(self.label_6)
self.label_perim = QtGui.QLabel(self.centralwidget)
self.label_perim.setObjectName(_fromUtf8("label_perim"))
self.horizontalLayout_9.addWidget(self.label_perim)
spacerItem9 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_9.addItem(spacerItem9)
self.horizontalLayout_11.addLayout(self.horizontalLayout_9)
self.horizontalLayout_10 = QtGui.QHBoxLayout()
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.label_10 = QtGui.QLabel(self.centralwidget)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.horizontalLayout_10.addWidget(self.label_10)
self.label_area = QtGui.QLabel(self.centralwidget)
self.label_area.setObjectName(_fromUtf8("label_area"))
self.horizontalLayout_10.addWidget(self.label_area)
spacerItem10 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem10)
self.horizontalLayout_11.addLayout(self.horizontalLayout_10)
self.verticalLayout.addLayout(self.horizontalLayout_11)
self.tableResult.raise_()
self.label.raise_()
self.plane_of.raise_()
self.label_3.raise_()
self.location.raise_()
self.label_5.raise_()
self.perito.raise_()
self.label_7.raise_()
self.possession.raise_()
self.label_9.raise_()
self.homeowner.raise_()
self.label_11.raise_()
self.date.raise_()
self.label_11.raise_()
self.calcBtn.raise_()
self.perito.raise_()
self.label_3.raise_()
self.label_2.raise_()
self.label_sumAngs.raise_()
self.label_6.raise_()
self.label_perim.raise_()
self.label_10.raise_()
self.label_area.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 738, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuArchivo = QtGui.QMenu(self.menubar)
self.menuArchivo.setObjectName(_fromUtf8("menuArchivo"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(MainWindow)
self.toolBar.setObjectName(_fromUtf8("toolBar"))
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionNew = QtGui.QAction(MainWindow)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("icons/new-file.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionNew.setIcon(icon)
self.actionNew.setObjectName(_fromUtf8("actionNew"))
self.actionEdit = QtGui.QAction(MainWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8("icons/edit-file.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionEdit.setIcon(icon1)
self.actionEdit.setObjectName(_fromUtf8("actionEdit"))
self.actionToPdf = QtGui.QAction(MainWindow)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8("icons/pdf.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionToPdf.setIcon(icon2)
self.actionToPdf.setObjectName(_fromUtf8("actionToPdf"))
self.actionSave = QtGui.QAction(MainWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8("icons/floppy-128.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave.setIcon(icon3)
self.actionSave.setObjectName(_fromUtf8("actionSave"))
self.actionSave_as = QtGui.QAction(MainWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8("icons/save_as-128.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave_as.setIcon(icon4)
self.actionSave_as.setObjectName(_fromUtf8("actionSave_as"))
self.actionOpen = QtGui.QAction(MainWindow)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8("icons/open-file.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen.setIcon(icon5)
self.actionOpen.setObjectName(_fromUtf8("actionOpen"))
self.menuArchivo.addAction(self.actionNew)
self.menuArchivo.addAction(self.actionOpen)
self.menuArchivo.addAction(self.actionSave)
self.menuArchivo.addAction(self.actionSave_as)
self.menuArchivo.addAction(self.actionToPdf)
self.menubar.addAction(self.menuArchivo.menuAction())
self.toolBar.addAction(self.actionNew)
self.toolBar.addAction(self.actionOpen)
self.toolBar.addAction(self.actionSave)
self.toolBar.addAction(self.actionEdit)
self.toolBar.addAction(self.actionToPdf)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Planilla de cálculo de coordenadas y superficie", None))
self.label.setText(_translate("MainWindow", "Plano de:", None))
self.plane_of.setText(_translate("MainWindow", "-", None))
self.label_5.setText(_translate("MainWindow", "Perito:", None))
self.perito.setText(_translate("MainWindow", "Ing. COPA, Rodi Alfredo", None))
self.label_7.setText(_translate("MainWindow", "Propiedad:", None))
self.possession.setText(_translate("MainWindow", "-", None))
self.label_9.setText(_translate("MainWindow", "Propietario:", None))
self.homeowner.setText(_translate("MainWindow", "-", None))
self.label_3.setText(_translate("MainWindow", "Ubicación:", None))
self.location.setText(_translate("MainWindow", "-", None))
self.label_11.setText(_translate("MainWindow", "Fecha:", None))
self.date.setText(_translate("MainWindow", "-", None))
self.calcBtn.setText(_translate("MainWindow", "Calcular", None))
self.label_2.setText(_translate("MainWindow", "Suma de ángulos:", None))
self.label_sumAngs.setText(_translate("MainWindow", "-", None))
self.label_6.setText(_translate("MainWindow", "Perímetro:", None))
self.label_perim.setText(_translate("MainWindow", "-", None))
self.label_10.setText(_translate("MainWindow", "Superficie:", None))
self.label_area.setText(_translate("MainWindow", "-", None))
self.menuArchivo.setTitle(_translate("MainWindow", "Archivo", None))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar", None))
self.actionNew.setText(_translate("MainWindow", "Nuevo", None))
self.actionNew.setToolTip(_translate("MainWindow", "Nuevo", None))
self.actionNew.setShortcut(_translate("MainWindow", "Ctrl+N", None))
self.actionEdit.setText(_translate("MainWindow", "Editar", None))
self.actionEdit.setToolTip(_translate("MainWindow", "Editar", None))
self.actionToPdf.setText(_translate("MainWindow", "Exportar a pdf", None))
self.actionToPdf.setToolTip(_translate("MainWindow", "Exportar a pdf", None))
self.actionSave.setText(_translate("MainWindow", "Guardar", None))
self.actionSave.setToolTip(_translate("MainWindow", "Guardar", None))
self.actionSave.setShortcut(_translate("MainWindow", "Ctrl+G", None))
self.actionSave_as.setText(_translate("MainWindow", "Guardar como", None))
self.actionSave_as.setToolTip(_translate("MainWindow", "Guardaar archivo como", None))
self.actionSave_as.setShortcut(_translate("MainWindow", "Ctrl+Shift+G", None))
self.actionOpen.setText(_translate("MainWindow", "Abrir", None))
self.actionOpen.setToolTip(_translate("MainWindow", "Abrir archivo", None))
self.actionOpen.setShortcut(_translate("MainWindow", "Ctrl+O", None))
# import icons_rc
| gpl-3.0 | -7,943,037,277,970,612,000 | 52.507246 | 116 | 0.692037 | false | 3.882229 | false | false | false |
rafafigueroa/edison-quad | motor_calibration.py | 1 | 2147 | #!/usr/bin/python
import curses
import shelve
from Adafruit_PWM_Servo_Driver import PWM
import time
import numpy as np
# Initialise the PWM device using the default address
pwm = PWM(0x40, debug=False)
pulseMin = 2000 # Min pulse length out of 4096
pulseLow = 2500
pulseMax = 3047
pulseStop = 0
motorChannel = 0
pwm.setPWMFreq(400) # Set frequency to x Hz
pwm.setPWM(motorChannel, 0, pulseMin) # Set to min (thrtle down)
time.sleep(2) # Wait for motors to be armed
drone_vars = shelve.open('drone_vars')
stdscr = curses.initscr()
curses.cbreak()
stdscr.keypad(1)
stdscr.addstr(0, 10, "Hit 'q' to quit, 'j' to go down and 'k' to go up")
stdscr.refresh()
key = ''
def close_safely():
drone_vars.close()
pwm.setPWM(motorChannel, 0, pulseStop)
curses.endwin()
print('Stopping motor')
cal_index = 3
pwm_pulse = 2200
try:
while key != ord('q'):
key = stdscr.getch()
stdscr.refresh()
cal_rpm = str(cal_index * 1000)
if key == curses.KEY_LEFT:
pwm_pulse = pwm_pulse - 1
stdscr.addstr(cal_index, 20, 'cal rpm: ' + cal_rpm + \
' pulse: ' + str(pwm_pulse))
pwm.setPWM(motorChannel, 0, pwm_pulse)
elif key == curses.KEY_RIGHT:
pwm_pulse = pwm_pulse + 1
stdscr.addstr(cal_index, 20, 'cal rpm: ' + cal_rpm + \
' pulse: ' + str(pwm_pulse))
pwm.setPWM(motorChannel, 0, pwm_pulse)
elif key == ord('j'):
stdscr.addstr(cal_index, 20, 'saved rpm: ' + cal_rpm + \
' pulse: ' + str(pwm_pulse))
drone_vars[cal_rpm] = pwm_pulse
cal_index = cal_index + 1
elif key == ord('k'):
stdscr.addstr(cal_index, 20, 'saved rpm: ' + cal_rpm + \
' pulse: ' + str(pwm_pulse))
drone_vars[cal_rpm] = pwm_pulse
cal_index = cal_index - 1
close_safely()
# Catch an interrupt and set the motor to stop
except KeyboardInterrupt:
close_safely()
| gpl-2.0 | -8,695,745,289,481,325,000 | 26.525641 | 72 | 0.549604 | false | 3.190193 | false | false | false |
WojciechMigda/TCO-PCFStupskiPrize1 | src/cell_patches_kmeans.py | 1 | 8216 | #!/opt/anaconda2/bin/python
# -*- coding: utf-8 -*-
"""
################################################################################
#
# Copyright (c) 2015 Wojciech Migda
# All rights reserved
# Distributed under the terms of the MIT license
#
################################################################################
#
# Filename: cell_patches_kmeans.py
#
# Decription:
# Cell patches from images (with KMeans)
#
# Authors:
# Wojciech Migda
#
################################################################################
#
# History:
# --------
# Date Who Ticket Description
# ---------- --- --------- ------------------------------------------------
# 2015-12-20 wm Initial version
#
################################################################################
"""
from __future__ import print_function
DEBUG = False
__all__ = []
__version__ = 0.1
__date__ = '2015-12-20'
__updated__ = '2015-12-20'
from sys import path as sys_path
sys_path.insert(0, './Pipe')
#from pipe import *
import pipe as P
def pois(im, num_peaks, footprint_radius=2.5, min_dist=8, thr_abs=0.7):
from skimage.draw import circle
FOOTPRINT_RADIUS = footprint_radius
cxy = circle(4, 4, FOOTPRINT_RADIUS)
from numpy import zeros
cc = zeros((9, 9), dtype=int)
cc[cxy] = 1
from skimage.feature import peak_local_max
MIN_DIST = min_dist
THR_ABS = thr_abs
coordinates = [
peak_local_max(
im[:, :, layer],
min_distance=MIN_DIST,
footprint=cc,
threshold_abs=THR_ABS,
num_peaks=num_peaks) for layer in range(im.shape[2])]
return coordinates
@P.Pipe
def cluster(seq, nclust, window, with_polar):
from numpy import where,array
from skimagepipes import cart2polar_
w2 = window / 2
for im, pois in seq:
for layer in range(im.shape[2]):
p = pois[layer]
p = p[where(
(p[:, 0] >= w2) &
(p[:, 0] < (im.shape[0] - w2)) &
(p[:, 1] >= w2) &
(p[:, 1] < (im.shape[1] - w2))
)
]
print(str(p.shape[0]) + " pois")
patches = array([im[cx - w2:cx + w2, cy - w2:cy + w2, layer].ravel() for cx, cy in p])
if with_polar:
patches = array([cart2polar_(im[cx - w2:cx + w2, cy - w2:cy + w2, layer]).ravel() for cx, cy in p])
pass
from sklearn.cluster import KMeans,MiniBatchKMeans
#clf = KMeans(n_clusters=nclust, random_state=1, n_jobs=4)
clf = MiniBatchKMeans(
n_clusters=nclust,
random_state=1,
batch_size=5000)
clf.fit(patches)
VISUALIZE = False
#VISUALIZE = True
if VISUALIZE:
from matplotlib import pyplot as plt
fig, ax = plt.subplots(1, nclust, figsize=(8, 3), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
for i in range(nclust):
ax[i].imshow(clf.cluster_centers_[i].reshape((window, window)),
interpolation='nearest'
#, cmap=plt.cm.gray
)
ax[i].axis('off')
pass
fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
bottom=0.02, left=0.02, right=0.98)
plt.show()
pass
yield clf.cluster_centers_
pass
return
def work(in_csv_file, out_csv_file, max_n_pois, npatches, patch_size, with_polar):
from pypipes import as_csv_rows,iformat,loopcount,itime,iattach
from nppipes import itake,iexpand_dims
from skimagepipes import as_image,as_float,equalize_hist,imshow,trim,rgb_as_hed
from tcopipes import clean
features = (
in_csv_file
| as_csv_rows
#| P.skip(5)
#| P.take(4)
| itake(0)
| P.tee
| iformat('../../data/DX/{}-DX.png')
| as_image
| itime
| loopcount
| trim(0.2)
| as_float
| clean
| rgb_as_hed
| itake(0, axis=2)
| iexpand_dims(axis=2)
| equalize_hist
#| imshow("H layer", cmap='gray')
| iattach(pois, max_n_pois)
| cluster(npatches, patch_size, with_polar)
| P.as_list
)
#print(type(next(features, None)))
from numpy import vstack
from numpy import savetxt
#print(vstack(features).shape)
savetxt(out_csv_file, vstack(features), delimiter=',', fmt='%f')
pass
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
from sys import argv as Argv
if argv is None:
argv = Argv
pass
else:
Argv.extend(argv)
pass
from os.path import basename
program_name = basename(Argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by Wojciech Migda on %s.
Copyright 2015 Wojciech Migda. All rights reserved.
Licensed under the MIT License
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from argparse import FileType
from sys import stdout,stdin
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
#parser.add_argument("-D", "--data-dir",
# type=str, action='store', dest="data_dir", required=True,
# help="directory with input CSV files, BMP 'train' and 'test' subfolders, and where H5 will be stored")
parser.add_argument("-i", "--in-csv",
action='store', dest="in_csv_file", default=stdin,
type=FileType('r'),
help="input CSV file name")
parser.add_argument("-o", "--out-csv",
action='store', dest="out_csv_file", default=stdout,
type=FileType('w'),
help="output CSV file name")
parser.add_argument("-p", "--patch-size",
type=int, default=16, action='store', dest="patch_size",
help="size of square patch to build the codebook upon, in pixels")
parser.add_argument("-C", "--num-patches",
type=int, default=80, action='store', dest="npatches",
help="number of patches per image")
parser.add_argument("-N", "--max-pois",
type=int, default=5000, action='store', dest="max_n_pois",
help="max number of PoIs to collect (num_peaks of peak_local_max)")
parser.add_argument("-P", "--with-polar",
default=False, action='store_true', dest="with_polar",
help="convert patches to polar coordinates")
# Process arguments
args = parser.parse_args()
for k, v in args.__dict__.items():
print(str(k) + ' => ' + str(v))
pass
work(args.in_csv_file,
args.out_csv_file,
args.max_n_pois,
args.npatches,
args.patch_size,
args.with_polar)
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
except Exception as e:
if DEBUG:
raise(e)
pass
indent = len(program_name) * " "
from sys import stderr
stderr.write(program_name + ": " + repr(e) + "\n")
stderr.write(indent + " for help use --help")
return 2
pass
if __name__ == "__main__":
if DEBUG:
from sys import argv
argv.append("--in-csv=../../data/training.csv")
argv.append("--max-pois=5000")
pass
from sys import exit as Exit
Exit(main())
pass
| mit | -4,096,860,922,282,649,600 | 28.66065 | 131 | 0.516796 | false | 3.721014 | false | false | false |
jachym/pywps-4-demo | processes/buffer.py | 1 | 2616 | import os
import tempfile
__author__ = 'Brauni'
from pywps import Process, LiteralInput, ComplexInput, ComplexOutput, Format, get_format
class Buffer(Process):
def __init__(self):
inputs = [ComplexInput('poly_in', 'Input1',
supported_formats=[get_format('GML')],
max_occurs='2'),
LiteralInput('buffer', 'Buffer', data_type='float')
]
outputs = [ComplexOutput('buff_out', 'Buffered', supported_formats=[get_format('GML')])]
super(Buffer, self).__init__(
self._handler,
identifier='buffer',
version='0.1',
title="Brauni's 1st process",
abstract='This process is the best ever being coded',
profile='',
metadata=['Process', '1st', 'Hilarious'],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
from osgeo import ogr
inSource = ogr.Open(request.inputs['poly_in'][0].file)
inLayer = inSource.GetLayer()
out = inLayer.GetName()
outPath = os.path.join(tempfile.gettempdir(), out)
# create output file
driver = ogr.GetDriverByName('GML')
outSource = driver.CreateDataSource(outPath, ["XSISCHEMAURI=http://schemas.opengis.net/gml/2.1.2/feature.xsd"])
outLayer = outSource.CreateLayer(out, None, ogr.wkbUnknown)
# for each feature
featureCount = inLayer.GetFeatureCount()
index = 0
import time
while index < featureCount:
# get the geometry
inFeature = inLayer.GetNextFeature()
inGeometry = inFeature.GetGeometryRef()
# make the buffer
buff = inGeometry.Buffer(float(request.inputs['buffer'][0].data))
# create output feature to the file
outFeature = ogr.Feature(feature_def=outLayer.GetLayerDefn())
outFeature.SetGeometryDirectly(buff)
outLayer.CreateFeature(outFeature)
outFeature.Destroy() # makes it crash when using debug
index += 1
time.sleep(1) # making things little bit slower
response.update_status("Calculating buffer for feature %d from %d" % (index + 1, featureCount),
(100 * (index + 1) / featureCount * 1))
response.outputs['buff_out'].data_format = get_format('GML')
response.outputs['buff_out'].file = outPath
return response
| mit | 5,689,942,538,740,844,000 | 34.351351 | 119 | 0.573394 | false | 4.288525 | false | false | false |
pattonwebz/ScheduledTweetBot | search.py | 1 | 1764 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Scheduled Tweet Bot written in Python intended to run on a Raspberry Pi
# (will work anywhere Python and the dependancies are installed though)
# version: 0.9
import tweepy, sys
import dbconnect
import twitterfunctions
from configuration import dbconfig
getKeySecretQuery = ("SELECT CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET FROM Accounts WHERE user = 'default'")
authcnx=dbconnect.dbconnect(dbconfig)
authcursor=dbconnect.dbcursor(authcnx)
gotKeySecretResult = authcursor.execute(getKeySecretQuery)
KeySecretResult = authcursor.fetchall()
for (CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET) in KeySecretResult :
THE_CONSUMER_KEY = CONSUMER_KEY
THE_CONSUMER_SECRET = CONSUMER_SECRET
THE_ACCESS_KEY = ACCESS_KEY
THE_ACCESS_SECRET = ACCESS_SECRET
api = twitterfunctions.authenticatetwitter(THE_CONSUMER_KEY, THE_CONSUMER_SECRET, THE_ACCESS_KEY, THE_ACCESS_SECRET)
def searchtweets(query, getcount):
results = api.search(q=query,rpp=1,count=getcount)
cnx = dbconnect.dbconnect(dbconfig)
cursor = dbconnect.dbcursor(cnx)
for result in results:
tweet = result.text.encode('utf-8')
user = result.user.screen_name.encode('utf-8')
timesent = result.created_at
tweetid = result.id_str.encode('utf-8')
insertQuery = ('INSERT IGNORE INTO SearchedForTweets '
'(tweetid, username, tweetcontent, timesent, searchterm) '
'VALUES '
'("%s", "%s", %r, "%s", "%s")' % (tweetid, user, tweet, timesent, query))
cursor.execute(insertQuery)
cnx.commit()
print user + " " + tweet
query = str(sys.argv[1])
getcount = int(sys.argv[2])
searchtweets(query, getcount)
| agpl-3.0 | 8,095,361,641,197,468,000 | 31.072727 | 124 | 0.700113 | false | 3.260628 | false | false | false |
racheliel/My-little-business | MyLittleBuisness/api/createPageBus.py | 1 | 1438 | from google.appengine.ext.webapp import template
import webapp2
import json
from models.user import User
from models.page import Page
import time
class createPageBus(webapp2.RequestHandler):
def get(self):
user = User.checkToken(self.request.cookies.get('session'))
if self.request.cookies.get('our_token'): #the cookie that should contain the access token!
user = User.checkToken(self.request.cookies.get('our_token'))
if not user:
self.error(403)
self.response.write('No user - access denied')
return
page = None
page.title = self.request.get('title')
page.name = self.request.get('name')
page.address = self.request.get('address')
page.details = self.request.get('details')
page.emailBuss = self.request.get('emailBuss')
page.admin = user.key
if page:
if Page.checkIfPageExists(page.title,user.email):
self.response.write(json.dumps({'status':'exists'}))
return
else:
page = Page.addPage(page.title,page.name,page.address,page.details,page.emailBuss,page.admin)
time.sleep(0.5);
# page.put()
# time.sleep(0.5)
pages = Page.getAllPages(user)
self.response.write(json.dumps({"status": "OK", "pages": pages}))
app = webapp2.WSGIApplication([
('/api/createPageBus', createPageBus)
], debug=True)
| mit | -7,398,409,846,378,320,000 | 31.681818 | 102 | 0.630737 | false | 3.631313 | false | false | false |
13xforever/webserver | admin/CTK/CTK/Box.py | 5 | 2753 | # CTK: Cherokee Toolkit
#
# Authors:
# Alvaro Lopez Ortega <[email protected]>
#
# Copyright (C) 2009-2014 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from Widget import Widget
from Container import Container
from consts import HTML_JS_BLOCK
from util import *
HTML = '<div id="%(id)s" %(props)s>%(content)s%(embedded_js)s</div>'
class Box (Container):
"""
Widget for the base DIV element. All arguments are optional.
Arguments:
props: dictionary with properties for the DIV element, such
as {'class': 'test', 'display': 'none'}
content: if provided, it must be a CTK widget
embed_javascript: True|False. Disabled by default. If
enabled, Javascript code associated to the widget will
be rendered as part of the DIV definition instead of
using a separate Javascript block.
Examples:
box1 = CTK.Box()
box2 = CTK.Box({'class': 'test', 'id': 'test-box'},
CTK.RawHTML('This is a test box'))
"""
def __init__ (self, props={}, content=None, embed_javascript=False):
Container.__init__ (self)
self.props = props.copy()
self.embed_javascript = embed_javascript
# Object ID
if 'id' in self.props:
self.id = self.props.pop('id')
# Initial value
if content:
if isinstance (content, Widget):
self += content
elif type(content) in (list, type):
for o in content:
self += o
else:
raise TypeError, 'Unknown type: "%s"' %(type(content))
def Render (self):
render = Container.Render (self)
if self.embed_javascript and render.js:
js = HTML_JS_BLOCK %(render.js)
render.js = ''
else:
js = ''
props = {'id': self.id,
'props': props_to_str (self.props),
'content': render.html,
'embedded_js': js}
render.html = HTML %(props)
return render
| gpl-2.0 | -3,335,221,350,368,491,000 | 31.77381 | 72 | 0.593898 | false | 4.108955 | false | false | false |
elena/django-starterkit-userauth | project/settings.py | 1 | 3229 | """
Django settings for project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6=7fv&1yl9uuc-i1oi4gm5(*(vh+)@tgo7s3r&pmi@0z8y46ws'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'project.urls'
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# ------------------------------------------
# *** CUSTOM settings
# ------------------------------------------
import os
from os.path import join
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DJANGO_APPS = INSTALLED_APPS + (
)
THIRD_PARTY_APPS = (
'south',
'debug_toolbar',
)
LOCAL_APPS = (
'accounts',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
# Media files
MEDIA_ROOT = join(BASE_DIR, 'public/media')
MEDIA_URL = '/media/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = join(BASE_DIR, 'public/static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# Sites framework
SITE_ID = 1
# Email
DEFAULT_FROM_EMAIL = '[email protected]'
EMAIL_SUBJECT_PREFIX = '[project] '
SERVER_EMAIL = '[email protected]'
# Authentication
AUTH_USER_MODEL = 'accounts.Profile'
# LOGIN_REDIRECT_URL = '/'
LOGIN_URL = 'accounts:login'
LOGOUT_URL = 'accounts:logout'
if DEBUG:
MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INTERNAL_IPS = ('127.0.0.1',)
INTERCEPT_REDIRECTS = False
| mit | 4,738,676,723,596,372,000 | 19.967532 | 71 | 0.682874 | false | 3.190711 | false | false | false |
arannasousa/pagseguro_xml | build/lib/pagseguro_xml/consultas/v3/__init__.py | 2 | 1268 | # coding=utf-8
# ---------------------------------------------------------------
# Desenvolvedor: Arannã Sousa Santos
# Mês: 12
# Ano: 2015
# Projeto: pagseguro_xml
# e-mail: [email protected]
# ---------------------------------------------------------------
from .. import v2
class CONST(v2.CONST):
URL_TRANSACAO_DETALHES_V3 = u'https://ws.{ambiente}pagseguro.uol.com.br/v3/transactions/{chave_transacao}?{parametros}'
class ApiPagSeguroConsulta(v2.ApiPagSeguroConsulta):
def __init__(self, ambiente=CONST.AMBIENTE.SANDBOX):
super(ApiPagSeguroConsulta, self).__init__(ambiente=ambiente)
self.__url_transacao_detalhes_v3 = CONST.URL_TRANSACAO_DETALHES_V3
def detalhes_v3(self, email, token, chave_transacao):
from urllib import urlencode
from classes import ClasseTransacaoDetalhes
URL = self.__url_transacao_detalhes_v3.format(
ambiente=CONST.AMBIENTE._resolve_[self.__ambiente],
chave_transacao=chave_transacao,
parametros=urlencode(dict(email=email, token=token))
)
# resposta pode conter a ClassXML de resposta ou uma mensagem de erro
return self.__requisicao(URL, ClasseTransacaoDetalhes())
| gpl-2.0 | 4,163,932,741,889,303,000 | 34.166667 | 123 | 0.595577 | false | 3.057971 | false | false | false |
khosrow/hcron | hcron/scripts/hcron_info.py | 1 | 2803 | #! /usr/bin/env python
#
# hcron-info.py
# GPL--start
# This file is part of hcron
# Copyright (C) 2008-2010 Environment/Environnement Canada
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# GPL--end
"""Provide hcron related information.
Simply print the fully qualified host name of the executing machine.
"""
# system imports
import os
import os.path
import pwd
import socket
import sys
# hcron imports
from hcron.constants import *
# constants
PROG_NAME = os.path.basename(sys.argv[0])
def print_usage(PROG_NAME):
print """\
usage: %s --allowed
%s -es
%s --fqdn
Print hcron related information.
Where:
--allowed output "yes" if permitted to use hcron
-es event statuses
--fqdn fully qualified hostname""" % (PROG_NAME, PROG_NAME, PROG_NAME)
def print_allowed():
try:
userName = pwd.getpwuid(os.getuid()).pw_name
userEventListsPath = "%s/%s" % (HCRON_EVENT_LISTS_DUMP_DIR, userName)
if os.path.exists(userEventListsPath):
print "yes"
except Exception, detail:
pass
def print_fqdn():
try:
print socket.getfqdn()
except Exception, detail:
print "Error: Could not determine the fully qualified host name."
def print_user_event_status():
try:
userName = pwd.getpwuid(os.getuid()).pw_name
userEventListsPath = "%s/%s" % (HCRON_EVENT_LISTS_DUMP_DIR, userName)
print open(userEventListsPath, "r").read(),
except Exception, detail:
print "Error: Could not read event status information."
def main():
args = sys.argv[1:]
if len(args) == 0:
print_usage(PROG_NAME)
sys.exit(-1)
while args:
arg = args.pop(0)
if arg in [ "--allowed" ]:
print_allowed()
break
if arg in [ "-es" ]:
print_user_event_status()
break
elif arg in [ "--fqdn" ]:
print_fqdn()
break
elif arg in [ "-h", "--help" ]:
print_usage(PROG_NAME)
break
else:
print_usage(PROG_NAME)
sys.exit(-1)
if __name__ == "__main__":
main()
| gpl-2.0 | -5,623,088,466,478,454,000 | 24.026786 | 83 | 0.624331 | false | 3.697889 | false | false | false |
christinahedges/PyKE | pyke/keptrial.py | 2 | 13904 | from .utils import PyKEArgumentHelpFormatter
import numpy as np
from astropy.io import fits as pyfits
from matplotlib import pyplot as plt
from astropy.stats import LombScargle
from tqdm import tqdm
from . import kepio, kepmsg, kepkey, kepfit, kepfunc, kepstat
__all__ = ['keptrial']
def keptrial(infile, outfile=None, datacol='SAP_FLUX', errcol='SAP_FLUX_ERR',
fmin=0.1, fmax=50, nfreq=10, method='ft', ntrials=1000,
plot=False, overwrite=False, verbose=False,
logfile='keptrial.log'):
"""
keptrial -- Calculate best period and error estimate from time series
``keptrial`` measures the strongest period within the frequency range
:math:`fmin` to :math:`fmax` and estimates 1-:math:`\sigma` error
associated with that period. The error estimate is performed by
constructing ntrial new light curves from the original data provided in
datacol and adjusting each individual data point according to a random
number generator and a shot noise model. While a shot noise model is not
uniformly applicable to all Kepler targets it provides a useful 1st order
estimate for most. A power spectrum is calculated for each light curve
using a user-specified method and the highest peak in each power spectrum
recorded. The distribution of peaks is fit by a normal function, the
centroid is adopted as the best period and 1-standard deviation error is
taken from the standard deviation. A confidence limit is recorded as the
range within which all trial periods fall. While this is termed a '100%'
confidence limit, this only refers to the total number of trials rather
than formal confidence.
The larger the number of **ntrials**, the more robust the result. The
values of nfreq and ntrial have to be chosen carefully to avoid excessive
run times. The values of **fmin**, **fmax** and **nfreq** have to be
chosen carefully in order to provide a sensible measure of period and
error. It is recommended that ``kepperiodogram`` be used to estimate the period and
error before attempting to use ``keptrial``. An exercise of trial and error
will most-likely be needed to choose a permutation of :math:`fmin`,
:math:`fmax` and :math:`nfreq` that resolves the period distribution over a
significant number of frequency bins. If requested, the distribution and
normal fit are plotted. The plot updates after every ntrial iteration,
partly to relieve boredom, and partly for the user to assess whether they
are using the correct permutation of input parameters.
Parameters
----------
infile : str
The name of a MAST standard format FITS file containing a Kepler light
curve within the first data extension.
outfile : str
The name of the output FITS file with a new extension containing the
results of a Monte Carlo period analysis.
datacol : str
The column name containing data stored within extension 1 of infile.
This data is the input data for periodogram. Typically this name is
SAP_FLUX (Simple Aperture Photometry fluxes), but any data column within
extension 1 of the FITS file can be used provided it is coupled to an
error column name using errcol.
errcol : str
The uncertainty data coupled to datacol. Typically this column is
called SAP_FLUX_ERR.
fmin : float [1/day]
The minimum frequency on which each power spectrum will be calculated.
fmax : float [1/day]
The maximum frequency on which each power spectrum will be calculated.
nfreq : int
The number of uniform frequency steps between fmin and fmax over which
the power spectrum will be calculated.
method : str
Choose a method for calculating the power spectrum. Currently, only
'ft', a discrete Fourier transform, is available.
ntrials : int
The number of Monte Carlo trials required before calculating the best
periods, period uncertainty and confidence in the measurement.
plot : bool
Plot the output window function?
overwrite : bool
Overwrite the output file?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning messages.
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPTRIAL -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' datacol={}'.format(datacol)
+ ' errcol={}'.format(errcol)
+ ' fmin={}'.format(fmin)
+ ' fmax={}'.format(fmax)
+ ' nfreq={}'.format(nfreq)
+ ' method={}'.format(method)
+ ' ntrials={}'.format(ntrials)
+ ' plot={}'.format(plot)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPTRIAL started at', logfile, verbose)
# overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = 'ERROR -- KEPTRIAL: {} exists. Use --overwrite'.format(outfile)
kepmsg.err(logfile, errmsg, verbose)
# open input file
instr = pyfits.open(infile, 'readonly')
# fudge non-compliant FITS keywords with no values
instr = kepkey.emptykeys(instr, infile, logfile, verbose)
# input data
try:
barytime = instr[1].data.field('barytime')
except:
barytime = kepio.readfitscol(infile, instr[1].data, 'time', logfile,
verbose)
signal = kepio.readfitscol(infile, instr[1].data, datacol, logfile,
verbose)
err = kepio.readfitscol(infile, instr[1].data, errcol, logfile, verbose)
# remove infinite data from time series
try:
nanclean = instr[1].header['NANCLEAN']
except:
incols = [barytime, signal, err]
[barytime, signal, err] = kepstat.removeinfinlc(signal, incols)
# frequency steps and Monte Carlo iterations
deltaf = (fmax - fmin) / float(nfreq)
freq, pmax, trial = [], [], []
for i in tqdm(range(ntrials)):
trial.append(i + 1)
# adjust data within the error bars
#work1 = kepstat.randarray(signal, err)
# determine FT power
fr = np.arange(fmin, fmax, deltaf)
power = LombScargle(barytime, signal, signal.max()-signal.min()).power(fr)
# determine peak in FT
pmax.append(-1.0e30)
for j in range(len(fr)):
if (power[j] > pmax[-1]):
pmax[-1] = power[j]
f1 = fr[j]
freq.append(f1)
# plot stop-motion histogram
plt.figure()
plt.clf()
plt.axes([0.08, 0.08, 0.88, 0.89])
plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
n, bins, patches = plt.hist(freq, bins=nfreq, range=[fmin, fmax],
align='mid', rwidth=1, ec='#0000ff',
fc='#ffff00', lw=2)
# fit normal distribution to histogram
x = np.zeros(len(bins))
for j in range(1, len(bins)):
x[j] = (bins[j] + bins[j - 1]) / 2.
pinit = np.array([float(i), freq[-1], deltaf])
n = np.array(n, dtype='float32')
coeffs, errors, covar, sigma, chi2, dof, fit, plotx, ploty = \
kepfit.leastsquares(kepfunc.gauss, pinit, x[1:], n, None,
logfile, verbose)
f = np.arange(fmin, fmax, (fmax - fmin) / 100.)
fit = kepfunc.gauss(coeffs, f)
plt.plot(f, fit, 'r-', linewidth=2)
plt.xlabel(r'Frequency (1/d)', {'color' : 'k'})
plt.ylabel('N', {'color' : 'k'})
plt.xlim(fmin, fmax)
plt.grid()
# render plot
if plot:
plt.show()
# period results
p = 1.0 / coeffs[1]
perr = p * coeffs[2] / coeffs[1]
f1 = fmin; f2 = fmax
gotbin = False
for i in range(len(n)):
if n[i] > 0 and not gotbin:
f1 = bins[i]
gotbin = True
gotbin = False
for i in range(len(n) - 1, 0, -1):
if n[i] > 0 and not gotbin:
f2 = bins[i + 1]
gotbin = True
powave, powstdev = np.mean(pmax), np.std(pmax)
# print result
print(' best period: %.10f days (%.7f min)' % (p, p * 1440.0))
print(' 1-sigma period error: %.10f days (%.7f min)' % (perr, perr * 1440.0))
print(' search range: %.10f - %.10f days ' % (1.0 / fmax, 1.0 / fmin))
print(' 100%% confidence range: %.10f - %.10f days ' % (1.0 / f2, 1.0 / f1))
print(' number of trials: %d' % ntrials)
print(' number of frequency bins: %d' % nfreq)
# history keyword in output file
kepkey.history(call, instr[0], outfile, logfile, verbose)
## write output file
col1 = pyfits.Column(name='TRIAL', format='J', array=trial)
col2 = pyfits.Column(name='FREQUENCY', format='E', unit='1/day',
array=freq)
col3 = pyfits.Column(name='POWER', format='E', array=pmax)
cols = pyfits.ColDefs([col1,col2,col3])
instr.append(pyfits.BinTableHDU.from_columns(cols))
try:
instr[-1].header['EXTNAME'] = ('TRIALS', 'Extension name')
except:
raise KeyError("Could not write EXTNAME to the header of the output"
" file")
try:
instr[-1].header['SEARCHR1'] = (1.0 / fmax,
'Search range lower bound (days)')
except:
raise KeyError("Could not write SEARCHR1 to the header of the output"
" file")
try:
instr[-1].header['SEARCHR2'] = (1.0 / fmin,
'Search range upper bound (days)')
except:
raise KeyError("Could not write SEARCHR2 to the header of the output"
" file")
try:
instr[-1].header['NFREQ'] = (nfreq, 'Number of frequency bins')
except:
raise KeyError("Could not write NFREQ to the header of the output"
" file")
try:
instr[-1].header['PERIOD'] = (p, 'Best period (days)')
except:
raise KeyError("Could not write PERIOD to the header of the output"
" file")
try:
instr[-1].header['PERIODE'] = (perr, '1-sigma period error (days)')
except:
raise KeyError("Could not write PERIODE to the header of the output"
" file")
try:
instr[-1].header['CONFIDR1'] = (1.0 / f2,
'Trial confidence lower bound (days)')
except:
raise KeyError("Could not write CONFIDR1 to the header of the output"
" file")
try:
instr[-1].header['CONFIDR2'] = (1.0 / f1,
'Trial confidence upper bound (days)')
except:
raise KeyError("Could not write CONFIDR2 to the header of the output"
" file")
try:
instr[-1].header['NTRIALS'] = (ntrials, 'Number of trials')
except:
raise KeyError("Could not write NTRIALS to the header of the output"
" file")
print("Writing output file {}...".format(outfile))
instr.writeto(outfile)
# close input file
instr.close()
## end time
kepmsg.clock('KEPTRAIL completed at', logfile, verbose)
def keptrial_main():
import argparse
parser = argparse.ArgumentParser(
description=('Calculate best period and error estimate from'
' Fourier transform'),
formatter_class=PyKEArgumentHelpFormatter)
parser.add_argument('infile', help='Name of input file', type=str)
parser.add_argument('--outfile',
help=('Name of FITS file to output.'
' If None, outfile is infile-keptrial.'),
default=None)
parser.add_argument('--datacol', default='SAP_FLUX',
help='Name of data column', type=str)
parser.add_argument('--errcol', default='SAP_FLUX_ERR',
help='Name of data error column', type=str)
parser.add_argument('--fmin', default=0.1,
help='Minimum search frequency [1/day]', type=float)
parser.add_argument('--fmax', default=50.,
help='Maximum search frequency [1/day]', type=float)
parser.add_argument('--nfreq', default=100,
help='Number of frequency intervals', type=int)
parser.add_argument('--method', default='ft',
help='Frequency search method', type=str,
choices=['ft'])
parser.add_argument('--ntrials', default=1000,
help='Number of search trials', type=int)
parser.add_argument('--plot', action='store_true', help='Plot result?')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite output file?')
parser.add_argument('--verbose', action='store_true',
help='Write to a log file?')
parser.add_argument('--logfile', '-l', help='Name of ascii log file',
default='keptrial.log', type=str)
args = parser.parse_args()
keptrial(args.infile, args.outfile, args.datacol, args.errcol, args.fmin,
args.fmax, args.nfreq, args.method, args.ntrials, args.plot,
args.overwrite, args.verbose, args.logfile)
| mit | -7,235,509,604,369,279,000 | 43.851613 | 87 | 0.596231 | false | 3.892497 | false | false | false |
Zelgadis87/Sick-Beard | lib/guessit/language.py | 8 | 13739 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit import UnicodeMixin, base_text_type, u
from guessit.textutils import find_words
from babelfish import Language
import babelfish
import re
import logging
from guessit.guess import Guess
__all__ = ['Language', 'UNDETERMINED',
'search_language', 'guess_language']
log = logging.getLogger(__name__)
UNDETERMINED = babelfish.Language('und')
SYN = {('und', None): ['unknown', 'inconnu', 'unk', 'un'],
('ell', None): ['gr', 'greek'],
('spa', None): ['esp', 'español'],
('fra', None): ['français', 'vf', 'vff', 'vfi'],
('swe', None): ['se'],
('por', 'BR'): ['po', 'pb', 'pob', 'br', 'brazilian'],
('cat', None): ['català'],
('ces', None): ['cz'],
('ukr', None): ['ua'],
('zho', None): ['cn'],
('jpn', None): ['jp'],
('hrv', None): ['scr'],
('mul', None): ['multi', 'dl'], # http://scenelingo.wordpress.com/2009/03/24/what-does-dl-mean/
}
class GuessitConverter(babelfish.LanguageReverseConverter):
_with_country_regexp = re.compile('(.*)\((.*)\)')
_with_country_regexp2 = re.compile('(.*)-(.*)')
def __init__(self):
self.guessit_exceptions = {}
for (alpha3, country), synlist in SYN.items():
for syn in synlist:
self.guessit_exceptions[syn.lower()] = (alpha3, country, None)
@property
def codes(self):
return (babelfish.language_converters['alpha3b'].codes |
babelfish.language_converters['alpha2'].codes |
babelfish.language_converters['name'].codes |
babelfish.language_converters['opensubtitles'].codes |
babelfish.country_converters['name'].codes |
frozenset(self.guessit_exceptions.keys()))
def convert(self, alpha3, country=None, script=None):
return str(babelfish.Language(alpha3, country, script))
def reverse(self, name):
with_country = (GuessitConverter._with_country_regexp.match(name) or
GuessitConverter._with_country_regexp2.match(name))
if with_country:
lang = babelfish.Language.fromguessit(with_country.group(1).strip())
lang.country = babelfish.Country.fromguessit(with_country.group(2).strip())
return (lang.alpha3, lang.country.alpha2 if lang.country else None, lang.script or None)
# exceptions come first, as they need to override a potential match
# with any of the other guessers
try:
return self.guessit_exceptions[name.lower()]
except KeyError:
pass
for conv in [babelfish.Language,
babelfish.Language.fromalpha3b,
babelfish.Language.fromalpha2,
babelfish.Language.fromname,
babelfish.Language.fromopensubtitles]:
try:
c = conv(name)
return c.alpha3, c.country, c.script
except (ValueError, babelfish.LanguageReverseError):
pass
raise babelfish.LanguageReverseError(name)
babelfish.language_converters['guessit'] = GuessitConverter()
COUNTRIES_SYN = {'ES': ['españa'],
'GB': ['UK'],
'BR': ['brazilian', 'bra'],
# FIXME: this one is a bit of a stretch, not sure how to do
# it properly, though...
'MX': ['Latinoamérica', 'latin america']
}
class GuessitCountryConverter(babelfish.CountryReverseConverter):
def __init__(self):
self.guessit_exceptions = {}
for alpha2, synlist in COUNTRIES_SYN.items():
for syn in synlist:
self.guessit_exceptions[syn.lower()] = alpha2
@property
def codes(self):
return (babelfish.country_converters['name'].codes |
frozenset(babelfish.COUNTRIES.values()) |
frozenset(self.guessit_exceptions.keys()))
def convert(self, alpha2):
return str(babelfish.Country(alpha2))
def reverse(self, name):
# exceptions come first, as they need to override a potential match
# with any of the other guessers
try:
return self.guessit_exceptions[name.lower()]
except KeyError:
pass
try:
return babelfish.Country(name.upper()).alpha2
except ValueError:
pass
for conv in [babelfish.Country.fromname]:
try:
return conv(name).alpha2
except babelfish.CountryReverseError:
pass
raise babelfish.CountryReverseError(name)
babelfish.country_converters['guessit'] = GuessitCountryConverter()
class Language(UnicodeMixin):
"""This class represents a human language.
You can initialize it with pretty much anything, as it knows conversion
from ISO-639 2-letter and 3-letter codes, English and French names.
You can also distinguish languages for specific countries, such as
Portuguese and Brazilian Portuguese.
There are various properties on the language object that give you the
representation of the language for a specific usage, such as .alpha3
to get the ISO 3-letter code, or .opensubtitles to get the OpenSubtitles
language code.
>>> Language('fr')
Language(French)
>>> (Language('eng').english_name) == 'English'
True
>>> (Language('pt(br)').country.name) == 'BRAZIL'
True
>>> (Language('zz', strict=False).english_name) == 'Undetermined'
True
>>> (Language('pt(br)').opensubtitles) == 'pob'
True
"""
def __init__(self, language, country=None, strict=False):
language = u(language.strip().lower())
country = babelfish.Country(country.upper()) if country else None
try:
self.lang = babelfish.Language.fromguessit(language)
# user given country overrides guessed one
if country:
self.lang.country = country
except babelfish.LanguageReverseError:
msg = 'The given string "%s" could not be identified as a language' % language
if strict:
raise ValueError(msg)
log.debug(msg)
self.lang = UNDETERMINED
@property
def country(self):
return self.lang.country
@property
def alpha2(self):
return self.lang.alpha2
@property
def alpha3(self):
return self.lang.alpha3
@property
def alpha3term(self):
return self.lang.alpha3b
@property
def english_name(self):
return self.lang.name
@property
def opensubtitles(self):
return self.lang.opensubtitles
@property
def tmdb(self):
if self.country:
return '%s-%s' % (self.alpha2, self.country.alpha2)
return self.alpha2
def __hash__(self):
return hash(self.lang)
def __eq__(self, other):
if isinstance(other, Language):
# in Guessit, languages are considered equal if their main languages are equal
return self.alpha3 == other.alpha3
if isinstance(other, base_text_type):
try:
return self == Language(other)
except ValueError:
return False
return False
def __ne__(self, other):
return not self == other
def __bool__(self):
return self.lang != UNDETERMINED
__nonzero__ = __bool__
def __unicode__(self):
if self.lang.country:
return '%s(%s)' % (self.english_name, self.country.alpha2)
else:
return self.english_name
def __repr__(self):
if self.lang.country:
return 'Language(%s, country=%s)' % (self.english_name, self.lang.country)
else:
return 'Language(%s)' % self.english_name
# list of common words which could be interpreted as languages, but which
# are far too common to be able to say they represent a language in the
# middle of a string (where they most likely carry their commmon meaning)
LNG_COMMON_WORDS = frozenset([
# english words
'is', 'it', 'am', 'mad', 'men', 'man', 'run', 'sin', 'st', 'to',
'no', 'non', 'war', 'min', 'new', 'car', 'day', 'bad', 'bat', 'fan',
'fry', 'cop', 'zen', 'gay', 'fat', 'one', 'cherokee', 'got', 'an', 'as',
'cat', 'her', 'be', 'hat', 'sun', 'may', 'my', 'mr', 'rum', 'pi',
# french words
'bas', 'de', 'le', 'son', 'ne', 'ca', 'ce', 'et', 'que',
'mal', 'est', 'vol', 'or', 'mon', 'se',
# spanish words
'la', 'el', 'del', 'por', 'mar',
# other
'ind', 'arw', 'ts', 'ii', 'bin', 'chan', 'ss', 'san', 'oss', 'iii',
'vi', 'ben', 'da', 'lt', 'ch',
# new from babelfish
'mkv', 'avi', 'dmd', 'the', 'dis', 'cut', 'stv', 'des', 'dia', 'and',
'cab', 'sub', 'mia', 'rim', 'las', 'une', 'par', 'srt', 'ano', 'toy',
'job', 'gag', 'reel', 'www', 'for', 'ayu', 'csi', 'ren', 'moi', 'sur',
'fer', 'fun', 'two', 'big', 'psy', 'air',
# release groups
'bs' # Bosnian
])
subtitle_prefixes = ['sub', 'subs', 'st', 'vost', 'subforced', 'fansub', 'hardsub']
subtitle_suffixes = ['subforced', 'fansub', 'hardsub']
lang_prefixes = ['true']
def find_possible_languages(string):
"""Find possible languages in the string
:return: list of tuple (property, Language, lang_word, word)
"""
words = find_words(string)
valid_words = []
for word in words:
lang_word = word.lower()
key = 'language'
for prefix in subtitle_prefixes:
if lang_word.startswith(prefix):
lang_word = lang_word[len(prefix):]
key = 'subtitleLanguage'
for suffix in subtitle_suffixes:
if lang_word.endswith(suffix):
lang_word = lang_word[:len(suffix)]
key = 'subtitleLanguage'
for prefix in lang_prefixes:
if lang_word.startswith(prefix):
lang_word = lang_word[len(prefix):]
if not lang_word in LNG_COMMON_WORDS:
try:
lang = Language(lang_word)
# Keep language with alpha2 equilavent. Others are probably an uncommon language.
if lang == 'mul' or hasattr(lang, 'alpha2'):
valid_words.append((key, lang, lang_word, word))
except babelfish.Error:
pass
return valid_words
def search_language(string, lang_filter=None):
"""Looks for language patterns, and if found return the language object,
its group span and an associated confidence.
you can specify a list of allowed languages using the lang_filter argument,
as in lang_filter = [ 'fr', 'eng', 'spanish' ]
>>> search_language('movie [en].avi')['language']
Language(English)
>>> search_language('the zen fat cat and the gay mad men got a new fan', lang_filter = ['en', 'fr', 'es'])
"""
if lang_filter:
lang_filter = set(babelfish.Language.fromguessit(lang) for lang in lang_filter)
confidence = 1.0 # for all of them
for prop, language, lang, word in find_possible_languages(string):
pos = string.find(word)
end = pos + len(word)
if lang_filter and language not in lang_filter:
continue
# only allow those languages that have a 2-letter code, those that
# don't are too esoteric and probably false matches
#if language.lang not in lng3_to_lng2:
# continue
# confidence depends on alpha2, alpha3, english name, ...
if len(lang) == 2:
confidence = 0.8
elif len(lang) == 3:
confidence = 0.9
elif prop == 'subtitleLanguage':
confidence = 0.6 # Subtitle prefix found with language
else:
# Note: we could either be really confident that we found a
# language or assume that full language names are too
# common words and lower their confidence accordingly
confidence = 0.3 # going with the low-confidence route here
return Guess({prop: language}, confidence=confidence, input=string, span=(pos, end))
return None
def guess_language(text): # pragma: no cover
"""Guess the language in which a body of text is written.
This uses the external guess-language python module, and will fail and return
Language(Undetermined) if it is not installed.
"""
try:
from guess_language import guessLanguage
return babelfish.Language.fromguessit(guessLanguage(text))
except ImportError:
log.error('Cannot detect the language of the given text body, missing dependency: guess-language')
log.error('Please install it from PyPI, by doing eg: pip install guess-language')
return UNDETERMINED
| gpl-3.0 | 8,712,618,466,136,381,000 | 33.249377 | 110 | 0.59684 | false | 3.783471 | false | false | false |
userdw/RaspberryPi_3_Starter_Kit | 08_Image_Processing/Smoothing_Filter/averaging/averaging.py | 1 | 2619 | import os, cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
_projectDirectory = os.path.dirname(__file__)
_imagesDirectory = os.path.join(_projectDirectory, "images")
_images = []
for _root, _dirs, _files in os.walk(_imagesDirectory):
for _file in _files:
if _file.endswith(".jpg"):
_images.append(os.path.join(_imagesDirectory, _file))
_imageIndex = 0
_imageTotal = len(_images)
_img = cv2.imread(_images[_imageIndex], cv2.IMREAD_UNCHANGED)
_img = cv2.cvtColor(_img, cv2.COLOR_BGR2GRAY)
_imgHeight, _imgWidth = _img.shape
_fig = plt.figure("Averaging Smoothing")
_gs = GridSpec(3, 4)
_noiseMean1 = 0.0
_stdDeviation1 = 0.5
_noiseMean2 = 0.0
_stdDeviation2 = 0.5
_noiseMean3 = 0.0
_stdDeviation3 = 0.5
_noiseMean4 = 0.0
_stdDeviation4 = 0.5
_fig1 = plt.subplot(_gs[0:3, 3])
_fig1.set_title("Original")
plt.imshow(_img, cmap = "gray")
_fimg = np.empty((_imgHeight, _imgWidth))
_fimg = cv2.normalize(_img, _fimg, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F)
_fig2 = plt.subplot(_gs[0:1, 0])
_fig2.set_title("Deviation: " + str(_stdDeviation1))
_fnoise1 = np.empty((_imgHeight, _imgWidth))
cv2.randn(_fnoise1, _noiseMean1, _stdDeviation1)
_fresult1 = _fimg + _fnoise1
_fresult1 = cv2.normalize(_fresult1, _fresult1, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F)
plt.imshow(_fresult1, cmap = "gray")
_fig3 = plt.subplot(_gs[2:3, 0])
_fig3.set_title("Deviation: " + str(_stdDeviation2))
_fnoise2 = np.empty((_imgHeight, _imgWidth))
cv2.randn(_fnoise2, _noiseMean2, _stdDeviation2)
_fresult2 = _fimg + _fnoise2
_fresult2 = cv2.normalize(_fresult2, _fresult2, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F)
plt.imshow(_fresult2, cmap = "gray")
_fig4 = plt.subplot(_gs[0:1, 1])
_fig4.set_title("Deviation: " + str(_stdDeviation3))
_fnoise3 = np.empty((_imgHeight, _imgWidth))
cv2.randn(_fnoise3, _noiseMean3, _stdDeviation3)
_fresult3 = _fimg + _fnoise3
_fresult3 = cv2.normalize(_fresult3, _fresult3, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F)
plt.imshow(_fresult3, cmap = "gray")
_fig5 = plt.subplot(_gs[2:3, 1])
_fig5.set_title("Deviation: " + str(_stdDeviation4))
_fnoise4 = np.empty((_imgHeight, _imgWidth))
cv2.randn(_fnoise4, _noiseMean4, _stdDeviation4)
_fresult4 = _fimg + _fnoise4
_fresult4 = cv2.normalize(_fresult4, _fresult4, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F)
plt.imshow(_fresult4, cmap = "gray")
_fig6 = plt.subplot(_gs[0:3, 2])
_fig6.set_title("Averaging")
_fresult5 = (_fresult1 + _fresult2 + _fresult3 + _fresult4) / 4
plt.imshow(_fresult5, cmap = "gray")
plt.tight_layout()
plt.show()
| mit | 9,209,321,182,256,391,000 | 31.576923 | 86 | 0.668958 | false | 2.299385 | false | false | false |
olivierdalang/QGIS | python/plugins/processing/tests/GdalAlgorithmsRasterTest.py | 3 | 124779 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GdalAlgorithmRasterTest.py
---------------------
Date : January 2016
Copyright : (C) 2016 by Matthias Kuhn
Email : [email protected]
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Matthias Kuhn'
import nose2
import os
import shutil
import tempfile
from qgis.core import (QgsProcessingContext,
QgsProcessingFeedback,
QgsRectangle,
QgsRasterLayer,
QgsProject)
from qgis.testing import (start_app,
unittest)
import AlgorithmsTestBase
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.algs.gdal.AssignProjection import AssignProjection
from processing.algs.gdal.ClipRasterByExtent import ClipRasterByExtent
from processing.algs.gdal.ClipRasterByMask import ClipRasterByMask
from processing.algs.gdal.ColorRelief import ColorRelief
from processing.algs.gdal.GridAverage import GridAverage
from processing.algs.gdal.GridDataMetrics import GridDataMetrics
from processing.algs.gdal.GridInverseDistance import GridInverseDistance
from processing.algs.gdal.GridInverseDistanceNearestNeighbor import GridInverseDistanceNearestNeighbor
from processing.algs.gdal.GridLinear import GridLinear
from processing.algs.gdal.GridNearestNeighbor import GridNearestNeighbor
from processing.algs.gdal.gdal2tiles import gdal2tiles
from processing.algs.gdal.gdalcalc import gdalcalc
from processing.algs.gdal.gdaltindex import gdaltindex
from processing.algs.gdal.contour import contour, contour_polygon
from processing.algs.gdal.gdalinfo import gdalinfo
from processing.algs.gdal.hillshade import hillshade
from processing.algs.gdal.aspect import aspect
from processing.algs.gdal.buildvrt import buildvrt
from processing.algs.gdal.proximity import proximity
from processing.algs.gdal.rasterize import rasterize
from processing.algs.gdal.retile import retile
from processing.algs.gdal.translate import translate
from processing.algs.gdal.warp import warp
from processing.algs.gdal.fillnodata import fillnodata
from processing.algs.gdal.rearrange_bands import rearrange_bands
from processing.algs.gdal.gdaladdo import gdaladdo
from processing.algs.gdal.sieve import sieve
from processing.algs.gdal.gdal2xyz import gdal2xyz
from processing.algs.gdal.polygonize import polygonize
from processing.algs.gdal.pansharp import pansharp
from processing.algs.gdal.merge import merge
from processing.algs.gdal.nearblack import nearblack
from processing.algs.gdal.slope import slope
from processing.algs.gdal.rasterize_over import rasterize_over
from processing.algs.gdal.rasterize_over_fixed_value import rasterize_over_fixed_value
from processing.algs.gdal.viewshed import viewshed
testDataPath = os.path.join(os.path.dirname(__file__), 'testdata')
class TestGdalRasterAlgorithms(unittest.TestCase, AlgorithmsTestBase.AlgorithmsTest):
@classmethod
def setUpClass(cls):
start_app()
from processing.core.Processing import Processing
Processing.initialize()
cls.cleanup_paths = []
@classmethod
def tearDownClass(cls):
for path in cls.cleanup_paths:
shutil.rmtree(path)
def test_definition_file(self):
return 'gdal_algorithm_raster_tests.yaml'
def testAssignProjection(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = AssignProjection()
alg.initAlgorithm()
# with target srs
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'CRS': 'EPSG:3111'}, context, feedback),
['gdal_edit.py',
'-a_srs EPSG:3111 ' +
source])
# with target using proj string
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'CRS': custom_crs}, context, feedback),
['gdal_edit.py',
'-a_srs EPSG:20936 ' +
source])
# with target using custom projection
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'CRS': custom_crs}, context, feedback),
['gdal_edit.py',
'-a_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" ' +
source])
# with non-EPSG crs code
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'CRS': 'POSTGIS:3111'}, context, feedback),
['gdal_edit.py',
'-a_srs EPSG:3111 ' +
source])
@unittest.skipIf(os.environ.get('TRAVIS', '') == 'true',
'gdal_edit.py: not found')
def testRunAssignProjection(self):
# Check that assign projection updates QgsRasterLayer info
# GDAL Assign Projection is based on gdal_edit.py
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = AssignProjection()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
fake_dem = os.path.join(outdir, 'dem-fake-crs.tif')
shutil.copy(source, fake_dem)
self.assertTrue(os.path.exists(fake_dem))
rlayer = QgsRasterLayer(fake_dem, "Fake dem")
self.assertTrue(rlayer.isValid())
self.assertEqual(rlayer.crs().authid(), 'EPSG:4326')
project = QgsProject()
project.setFileName(os.path.join(outdir, 'dem-fake-crs.qgs'))
project.addMapLayer(rlayer)
self.assertEqual(project.count(), 1)
context.setProject(project)
alg.run({'INPUT': fake_dem, 'CRS': 'EPSG:3111'},
context, feedback)
self.assertEqual(rlayer.crs().authid(), 'EPSG:3111')
def testGdalTranslate(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
translate_alg = translate()
translate_alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with None NODATA value
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'NODATA': None,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_nodata 9999.0 ' +
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_nodata 0.0 ' +
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value and custom data type
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'DATA_TYPE': 6,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_nodata 0.0 ' +
'-ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target srs
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'TARGET_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_srs EPSG:3111 ' +
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target using proj string
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'TARGET_CRS': custom_crs,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_srs EPSG:20936 ' +
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target using custom projection
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'TARGET_CRS': custom_crs,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" ' +
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with non-EPSG crs code
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'TARGET_CRS': 'POSTGIS:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_srs EPSG:3111 ' +
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with copy subdatasets
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'COPY_SUBDATASETS': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdal_translate',
'-sds ' +
'-of GTiff ' +
source + ' ' +
outdir + '/check.tif'])
# additional parameters
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-strict -unscale -epo',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-of JPEG -strict -unscale -epo ' +
source + ' ' +
outdir + '/check.jpg'])
def testClipRasterByExtent(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = ClipRasterByExtent()
alg.initAlgorithm()
extent = QgsRectangle(1, 2, 3, 4)
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTENT': extent,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-projwin 0.0 0.0 0.0 0.0 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTENT': extent,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-projwin 0.0 0.0 0.0 0.0 -a_nodata 9999.0 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTENT': extent,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-projwin 0.0 0.0 0.0 0.0 -a_nodata 0.0 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value and custom data type
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTENT': extent,
'NODATA': 0,
'DATA_TYPE': 6,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-projwin 0.0 0.0 0.0 0.0 -a_nodata 0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTENT': extent,
'OPTIONS': 'COMPRESS=DEFLATE|PREDICTOR=2|ZLEVEL=9',
'DATA_TYPE': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-projwin 0.0 0.0 0.0 0.0 -of JPEG -co COMPRESS=DEFLATE -co PREDICTOR=2 -co ZLEVEL=9 ' +
source + ' ' +
outdir + '/check.jpg'])
# with additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTENT': extent,
'EXTRA': '-s_srs EPSG:4326 -tps -tr 0.1 0.1',
'DATA_TYPE': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-projwin 0.0 0.0 0.0 0.0 -of JPEG -s_srs EPSG:4326 -tps -tr 0.1 0.1 ' +
source + ' ' +
outdir + '/check.jpg'])
def testClipRasterByMask(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
mask = os.path.join(testDataPath, 'polys.gml')
alg = ClipRasterByMask()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK': mask,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-of JPEG -cutline ' +
mask + ' -cl polys2 -crop_to_cutline ' + source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK': mask,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-of JPEG -cutline ' +
mask + ' -cl polys2 -crop_to_cutline -dstnodata 9999.0 ' + source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK': mask,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-of JPEG -cutline ' +
mask + ' -cl polys2 -crop_to_cutline -dstnodata 0.0 ' + source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value and custom data type
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK': mask,
'NODATA': 0,
'DATA_TYPE': 6,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-ot Float32 -of JPEG -cutline ' +
mask + ' -cl polys2 -crop_to_cutline -dstnodata 0.0 ' + source + ' ' +
outdir + '/check.jpg'])
# with creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK': mask,
'OPTIONS': 'COMPRESS=DEFLATE|PREDICTOR=2|ZLEVEL=9',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-of JPEG -cutline ' +
mask + ' -cl polys2 -crop_to_cutline -co COMPRESS=DEFLATE -co PREDICTOR=2 -co ZLEVEL=9 ' +
source + ' ' +
outdir + '/check.jpg'])
# with multothreading and additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK': mask,
'MULTITHREADING': True,
'EXTRA': '-nosrcalpha -wm 2048 -nomd',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-of JPEG -cutline ' +
mask + ' -cl polys2 -crop_to_cutline -multi -nosrcalpha -wm 2048 -nomd ' +
source + ' ' +
outdir + '/check.jpg'])
def testContourPolygon(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = contour_polygon()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD_NAME_MIN': 'min',
'FIELD_NAME_MAX': 'max',
'INTERVAL': 5,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['gdal_contour',
'-p -amax max -amin min -b 1 -i 5.0 -f "ESRI Shapefile" ' +
source + ' ' +
outdir + '/check.shp'])
def testContour(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = contour()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD_NAME': 'elev',
'INTERVAL': 5,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['gdal_contour',
'-b 1 -a elev -i 5.0 -f "ESRI Shapefile" ' +
source + ' ' +
outdir + '/check.shp'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD_NAME': 'elev',
'INTERVAL': 5,
'NODATA': 9999,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['gdal_contour',
'-b 1 -a elev -i 5.0 -snodata 9999.0 -f "ESRI Shapefile" ' +
source + ' ' +
outdir + '/check.shp'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD_NAME': 'elev',
'INTERVAL': 5,
'NODATA': 0,
'OUTPUT': outdir + '/check.gpkg'}, context, feedback),
['gdal_contour',
'-b 1 -a elev -i 5.0 -snodata 0.0 -f "GPKG" ' +
source + ' ' +
outdir + '/check.gpkg'])
# with CREATE_3D
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'CREATE_3D': True,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['gdal_contour',
'-b 1 -a ELEV -i 10.0 -3d -f "ESRI Shapefile" ' +
source + ' ' +
outdir + '/check.shp'])
# with IGNORE_NODATA and OFFSET
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'IGNORE_NODATA': True,
'OFFSET': 100,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['gdal_contour',
'-b 1 -a ELEV -i 10.0 -inodata -off 100.0 -f "ESRI Shapefile" ' +
source + ' ' +
outdir + '/check.shp'])
# with additional command line parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'EXTRA': '-e 3 -amin MIN_H',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['gdal_contour',
'-b 1 -a ELEV -i 10.0 -f "ESRI Shapefile" -e 3 -amin MIN_H ' +
source + ' ' +
outdir + '/check.shp'])
# obsolete OPTIONS param
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'OPTIONS': '-fl 100 125 150 200',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['gdal_contour',
'-b 1 -a ELEV -i 10.0 -f "ESRI Shapefile" -fl 100 125 150 200 ' +
source + ' ' +
outdir + '/check.shp'])
def testGdal2Tiles(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = gdal2tiles()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average ' +
source + ' ' +
outdir + '/'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': -9999,
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average -a -9999.0 ' +
source + ' ' +
outdir + '/'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average -a 0.0 ' +
source + ' ' +
outdir + '/'])
# with input srs
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average -s EPSG:3111 ' +
source + ' ' +
outdir + '/'])
# with target using proj string
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': custom_crs,
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average -s EPSG:20936 ' +
source + ' ' +
outdir + '/'])
# with target using custom projection
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': custom_crs,
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average -s "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" ' +
source + ' ' +
outdir + '/'])
# with non-EPSG crs code
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'POSTGIS:3111',
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average -s EPSG:3111 ' +
source + ' ' +
outdir + '/'])
def testGdalCalc(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = gdalcalc()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
output = outdir + '/check.jpg'
# default execution
formula = 'A*2' # default formula
self.assertEqual(
alg.getConsoleCommands({'INPUT_A': source,
'BAND_A': 1,
'FORMULA': formula,
'OUTPUT': output}, context, feedback),
['gdal_calc.py',
'--calc "{}" --format JPEG --type Float32 -A {} --A_band 1 --outfile {}'.format(formula, source, output)])
# check that formula is not escaped and formula is returned as it is
formula = 'A * 2' # <--- add spaces in the formula
self.assertEqual(
alg.getConsoleCommands({'INPUT_A': source,
'BAND_A': 1,
'FORMULA': formula,
'OUTPUT': output}, context, feedback),
['gdal_calc.py',
'--calc "{}" --format JPEG --type Float32 -A {} --A_band 1 --outfile {}'.format(formula, source, output)])
# additional creation options
formula = 'A*2'
self.assertEqual(
alg.getConsoleCommands({'INPUT_A': source,
'BAND_A': 1,
'FORMULA': formula,
'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75',
'OUTPUT': output}, context, feedback),
['gdal_calc.py',
'--calc "{}" --format JPEG --type Float32 -A {} --A_band 1 --co COMPRESS=JPEG --co JPEG_QUALITY=75 --outfile {}'.format(formula, source, output)])
# additional parameters
formula = 'A*2'
self.assertEqual(
alg.getConsoleCommands({'INPUT_A': source,
'BAND_A': 1,
'FORMULA': formula,
'EXTRA': '--debug --quiet',
'OUTPUT': output}, context, feedback),
['gdal_calc.py',
'--calc "{}" --format JPEG --type Float32 -A {} --A_band 1 --debug --quiet --outfile {}'.format(formula, source, output)])
def testGdalInfo(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = gdalinfo()
alg.initAlgorithm()
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': False,
'NOGCP': False,
'NO_METADATA': False,
'STATS': False}, context, feedback),
['gdalinfo',
source])
source = os.path.join(testDataPath, 'raster with spaces.tif')
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': False,
'NOGCP': False,
'NO_METADATA': False,
'STATS': False}, context, feedback),
['gdalinfo',
'"' + source + '"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': True,
'NOGCP': False,
'NO_METADATA': False,
'STATS': False}, context, feedback),
['gdalinfo',
'-mm "' + source + '"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': False,
'NOGCP': True,
'NO_METADATA': False,
'STATS': False}, context, feedback),
['gdalinfo',
'-nogcp "' + source + '"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': False,
'NOGCP': False,
'NO_METADATA': True,
'STATS': False}, context, feedback),
['gdalinfo',
'-nomd "' + source + '"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': False,
'NOGCP': False,
'NO_METADATA': False,
'STATS': True}, context, feedback),
['gdalinfo',
'-stats "' + source + '"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': False,
'NOGCP': False,
'NO_METADATA': False,
'STATS': False,
'EXTRA': '-proj4 -listmdd -checksum'}, context, feedback),
['gdalinfo',
'-proj4 -listmdd -checksum "' + source + '"'])
def testGdalTindex(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = gdaltindex()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
commands = alg.getConsoleCommands({'LAYERS': [source],
'OUTPUT': outdir + '/test.shp'}, context, feedback)
self.assertEqual(len(commands), 2)
self.assertEqual(commands[0], 'gdaltindex')
self.assertIn('-tileindex location -f "ESRI Shapefile" ' + outdir + '/test.shp', commands[1])
self.assertIn('--optfile ', commands[1])
# with input srs
commands = alg.getConsoleCommands({'LAYERS': [source],
'TARGET_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/test.shp'}, context, feedback)
self.assertEqual(len(commands), 2)
self.assertEqual(commands[0], 'gdaltindex')
self.assertIn('-tileindex location -t_srs EPSG:3111 -f "ESRI Shapefile" ' + outdir + '/test.shp', commands[1])
self.assertIn('--optfile ', commands[1])
# with target using proj string
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
commands = alg.getConsoleCommands({'LAYERS': [source],
'TARGET_CRS': custom_crs,
'OUTPUT': outdir + '/test.shp'}, context, feedback)
self.assertEqual(len(commands), 2)
self.assertEqual(commands[0], 'gdaltindex')
self.assertIn('-tileindex location -t_srs EPSG:20936 -f "ESRI Shapefile" ' + outdir + '/test.shp', commands[1])
self.assertIn('--optfile ', commands[1])
# with target using custom projection
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
commands = alg.getConsoleCommands({'LAYERS': [source],
'TARGET_CRS': custom_crs,
'OUTPUT': outdir + '/test.shp'}, context, feedback)
self.assertEqual(len(commands), 2)
self.assertEqual(commands[0], 'gdaltindex')
self.assertIn('-tileindex location -t_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" -f "ESRI Shapefile" ' + outdir + '/test.shp', commands[1])
self.assertIn('--optfile ', commands[1])
# with non-EPSG crs code
commands = alg.getConsoleCommands({'LAYERS': [source],
'TARGET_CRS': 'POSTGIS:3111',
'OUTPUT': outdir + '/test.shp'}, context, feedback)
self.assertEqual(len(commands), 2)
self.assertEqual(commands[0], 'gdaltindex')
self.assertIn(
'-tileindex location -t_srs EPSG:3111 -f "ESRI Shapefile" ' + outdir + '/test.shp',
commands[1])
self.assertIn('--optfile ', commands[1])
def testGridAverage(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'points.gml')
alg = GridAverage()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-z_multiply 1.5 -outsize 1754 1394',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG -z_multiply 1.5 -outsize 1754 1394 ' +
source + ' ' +
outdir + '/check.jpg'])
def testGridDataMetrics(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'points.gml')
alg = GridDataMetrics()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a minimum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a minimum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a minimum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# non-default datametrics
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'METRIC': 4,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a average_distance:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-z_multiply 1.5 -outsize 1754 1394',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdal_grid',
'-l points -a minimum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 ' +
'-ot Float32 -of GTiff -z_multiply 1.5 -outsize 1754 1394 ' +
source + ' ' +
outdir + '/check.tif'])
def testGridInverseDistance(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'points.gml')
alg = GridInverseDistance()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a invdist:power=2.0:smothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a invdist:power=2.0:smothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a invdist:power=2.0:smothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-z_multiply 1.5 -outsize 1754 1394',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdal_grid',
'-l points -a invdist:power=2.0:smothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 ' +
'-ot Float32 -of GTiff -z_multiply 1.5 -outsize 1754 1394 ' +
source + ' ' +
outdir + '/check.tif'])
def testGridInverseDistanceNearestNeighbour(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'points.gml')
alg = GridInverseDistanceNearestNeighbor()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a invdistnn:power=2.0:smothing=0.0:radius=1.0:max_points=12:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a invdistnn:power=2.0:smothing=0.0:radius=1.0:max_points=12:min_points=0:nodata=9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a invdistnn:power=2.0:smothing=0.0:radius=1.0:max_points=12:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-z_multiply 1.5 -outsize 1754 1394',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdal_grid',
'-l points -a invdistnn:power=2.0:smothing=0.0:radius=1.0:max_points=12:min_points=0:nodata=0.0 ' +
'-ot Float32 -of GTiff -z_multiply 1.5 -outsize 1754 1394 ' +
source + ' ' +
outdir + '/check.tif'])
def testGridLinear(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'points.gml')
alg = GridLinear()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a linear:radius=-1.0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a linear:radius=-1.0:nodata=9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a linear:radius=-1.0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-z_multiply 1.5 -outsize 1754 1394',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdal_grid',
'-l points -a linear:radius=-1.0:nodata=0.0 -ot Float32 -of GTiff ' +
'-z_multiply 1.5 -outsize 1754 1394 ' +
source + ' ' +
outdir + '/check.tif'])
def testGridNearestNeighbour(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'points.gml')
alg = GridNearestNeighbor()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-z_multiply 1.5 -outsize 1754 1394',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdal_grid',
'-l points -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=0.0 -ot Float32 -of GTiff ' +
'-z_multiply 1.5 -outsize 1754 1394 ' +
source + ' ' +
outdir + '/check.tif'])
def testHillshade(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = hillshade()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'Z_FACTOR': 5,
'SCALE': 2,
'AZIMUTH': 90,
'ALTITUDE': 20,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0'])
# paths with space
source_with_space = os.path.join(testDataPath, 'raster with spaces.tif')
self.assertEqual(
alg.getConsoleCommands({'INPUT': source_with_space,
'BAND': 1,
'Z_FACTOR': 5,
'SCALE': 2,
'AZIMUTH': 90,
'ALTITUDE': 20,
'OUTPUT': outdir + '/check out.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
'"' + source_with_space + '" ' +
'"{}/check out.tif" -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0'.format(outdir)])
# compute edges
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'Z_FACTOR': 5,
'SCALE': 2,
'AZIMUTH': 90,
'ALTITUDE': 20,
'COMPUTE_EDGES': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0 -compute_edges'])
# with ZEVENBERGEN
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'Z_FACTOR': 5,
'SCALE': 2,
'AZIMUTH': 90,
'ALTITUDE': 20,
'ZEVENBERGEN': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0 -alg ZevenbergenThorne'])
# with COMBINED
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'Z_FACTOR': 5,
'SCALE': 2,
'AZIMUTH': 90,
'ALTITUDE': 20,
'COMBINED': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0 -combined'])
# with multidirectional - "az" argument is not allowed!
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'Z_FACTOR': 5,
'SCALE': 2,
'AZIMUTH': 90,
'ALTITUDE': 20,
'MULTIDIRECTIONAL': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -alt 20.0 -multidirectional'])
# defaults with additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'EXTRA': '-q',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -z 1.0 -s 1.0 -az 315.0 -alt 45.0 -q'])
def testAspect(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = aspect()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': False,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': False,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1'])
# paths with space
source_with_space = os.path.join(testDataPath, 'raster with spaces.tif')
self.assertEqual(
alg.getConsoleCommands({'INPUT': source_with_space,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': False,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': False,
'OUTPUT': outdir + '/check out.tif'}, context, feedback),
['gdaldem',
'aspect ' +
'"' + source_with_space + '" ' +
'"{}/check out.tif" -of GTiff -b 1'.format(outdir)])
# compute edges
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': False,
'COMPUTE_EDGES': True,
'ZEVENBERGEN': False,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -compute_edges'])
# with ZEVENBERGEN
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': False,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -alg ZevenbergenThorne'])
# with ZERO_FLAT
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': True,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': False,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -zero_for_flat'])
# with TRIG_ANGLE
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': True,
'ZERO_FLAT': False,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': False,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -trigonometric'])
# with creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': False,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': False,
'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -co COMPRESS=JPEG -co JPEG_QUALITY=75'])
# with additional parameter
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': False,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': False,
'EXTRA': '-q',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -q'])
def testSlope(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = slope()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'slope ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -s 1.0'])
# compute edges
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'COMPUTE_EDGES': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'slope ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -s 1.0 -compute_edges'])
# with ZEVENBERGEN
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'ZEVENBERGEN': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'slope ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -s 1.0 -alg ZevenbergenThorne'])
# custom ratio
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'SCALE': 2.0,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'slope ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -s 2.0'])
# with creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'slope ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -s 1.0 -co COMPRESS=JPEG -co JPEG_QUALITY=75'])
# with additional parameter
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'EXTRA': '-q',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdaldem',
'slope ' +
source + ' ' +
outdir + '/check.jpg -of JPEG -b 1 -s 1.0 -q'])
def testColorRelief(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
colorTable = os.path.join(testDataPath, 'colors.txt')
alg = ColorRelief()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'COLOR_TABLE': colorTable,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'color-relief ' +
source + ' ' +
colorTable + ' ' +
outdir + '/check.tif -of GTiff -b 1'])
# paths with space
source_with_space = os.path.join(testDataPath, 'raster with spaces.tif')
self.assertEqual(
alg.getConsoleCommands({'INPUT': source_with_space,
'BAND': 1,
'COLOR_TABLE': colorTable,
'OUTPUT': outdir + '/check out.tif'}, context, feedback),
['gdaldem',
'color-relief ' +
'"' + source_with_space + '" ' +
colorTable + ' ' +
'"{}/check out.tif" -of GTiff -b 1'.format(outdir)])
# compute edges
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'COLOR_TABLE': colorTable,
'COMPUTE_EDGES': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'color-relief ' +
source + ' ' +
colorTable + ' ' +
outdir + '/check.tif -of GTiff -b 1 -compute_edges'])
# with custom matching mode
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'COLOR_TABLE': colorTable,
'MATCH_MODE': 1,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'color-relief ' +
source + ' ' +
colorTable + ' ' +
outdir + '/check.tif -of GTiff -b 1 -nearest_color_entry'])
# with creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'COLOR_TABLE': colorTable,
'MATCH_MODE': 1,
'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'color-relief ' +
source + ' ' +
colorTable + ' ' +
outdir + '/check.tif -of GTiff -b 1 -nearest_color_entry -co COMPRESS=JPEG -co JPEG_QUALITY=75'])
# with additional parameter
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'COLOR_TABLE': colorTable,
'EXTRA': '-alpha -q',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'color-relief ' +
source + ' ' +
colorTable + ' ' +
outdir + '/check.tif -of GTiff -b 1 -alpha -q'])
def testProximity(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = proximity()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_proximity.py',
'-srcband 1 -distunits PIXEL -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'BAND': 2,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_proximity.py',
'-srcband 2 -distunits PIXEL -nodata 9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'BAND': 1,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_proximity.py',
'-srcband 1 -distunits PIXEL -nodata 0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'EXTRA': '-dstband 2 -values 3,4,12',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_proximity.py',
'-srcband 1 -distunits PIXEL -ot Float32 -of JPEG -dstband 2 -values 3,4,12 ' +
source + ' ' +
outdir + '/check.jpg'])
def testRasterize(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'polys.gml')
alg = rasterize()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'id',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -ts 0.0 0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'FIELD': 'id',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -ts 0.0 0.0 -a_nodata 9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" INIT value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'INIT': 0,
'FIELD': 'id',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -ts 0.0 0.0 -init 0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'FIELD': 'id',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -ts 0.0 0.0 -a_nodata 0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'id',
'EXTRA': '-at -add',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -ts 0.0 0.0 -ot Float32 -of JPEG -at -add ' +
source + ' ' +
outdir + '/check.jpg'])
def testRasterizeOver(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
raster = os.path.join(testDataPath, 'dem.tif')
vector = os.path.join(testDataPath, 'polys.gml')
alg = rasterize_over()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': vector,
'FIELD': 'id',
'INPUT_RASTER': raster}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id ' +
vector + ' ' + raster])
self.assertEqual(
alg.getConsoleCommands({'INPUT': vector,
'FIELD': 'id',
'ADD': True,
'INPUT_RASTER': raster}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -add ' +
vector + ' ' + raster])
self.assertEqual(
alg.getConsoleCommands({'INPUT': vector,
'FIELD': 'id',
'EXTRA': '-i',
'INPUT_RASTER': raster}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -i ' +
vector + ' ' + raster])
def testRasterizeOverFixed(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
raster = os.path.join(testDataPath, 'dem.tif')
vector = os.path.join(testDataPath, 'polys.gml')
alg = rasterize_over_fixed_value()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': vector,
'BURN': 100,
'INPUT_RASTER': raster}, context, feedback),
['gdal_rasterize',
'-l polys2 -burn 100.0 ' +
vector + ' ' + raster])
self.assertEqual(
alg.getConsoleCommands({'INPUT': vector,
'BURN': 100,
'ADD': True,
'INPUT_RASTER': raster}, context, feedback),
['gdal_rasterize',
'-l polys2 -burn 100.0 -add ' +
vector + ' ' + raster])
self.assertEqual(
alg.getConsoleCommands({'INPUT': vector,
'BURN': 100,
'EXTRA': '-i',
'INPUT_RASTER': raster}, context, feedback),
['gdal_rasterize',
'-l polys2 -burn 100.0 -i ' +
vector + ' ' + raster])
def testRetile(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = retile()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': [source],
'OUTPUT': outdir}, context, feedback),
['gdal_retile.py',
'-ps 256 256 -overlap 0 -levels 1 -r near -ot Float32 -targetDir {} '.format(outdir) +
source])
# with input srs
self.assertEqual(
alg.getConsoleCommands({'INPUT': [source],
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir}, context, feedback),
['gdal_retile.py',
'-ps 256 256 -overlap 0 -levels 1 -s_srs EPSG:3111 -r near -ot Float32 -targetDir {} {}'.format(outdir, source)
])
# with target using proj string
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': [source],
'SOURCE_CRS': custom_crs,
'OUTPUT': outdir}, context, feedback),
['gdal_retile.py',
'-ps 256 256 -overlap 0 -levels 1 -s_srs EPSG:20936 -r near -ot Float32 -targetDir {} {}'.format(outdir, source)
])
# with target using custom projection
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': [source],
'SOURCE_CRS': custom_crs,
'OUTPUT': outdir}, context, feedback),
['gdal_retile.py',
'-ps 256 256 -overlap 0 -levels 1 -s_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" -r near -ot Float32 -targetDir {} {}'.format(outdir, source)
])
# with non-EPSG crs code
self.assertEqual(
alg.getConsoleCommands({'INPUT': [source],
'SOURCE_CRS': 'POSTGIS:3111',
'OUTPUT': outdir}, context, feedback),
['gdal_retile.py',
'-ps 256 256 -overlap 0 -levels 1 -s_srs EPSG:3111 -r near -ot Float32 -targetDir {} {}'.format(outdir, source)
])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': [source],
'EXTRA': '-v -tileIndex tindex.shp',
'OUTPUT': outdir}, context, feedback),
['gdal_retile.py',
'-ps 256 256 -overlap 0 -levels 1 -r near -ot Float32 -v -tileIndex tindex.shp -targetDir {} '.format(outdir) +
source])
def testWarp(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = warp()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with None NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': None,
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -dstnodata 9999.0 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -dstnodata 0.0 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value and custom data type
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'DATA_TYPE': 6,
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -dstnodata 0.0 -r near -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target using EPSG
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'EPSG:3111',
'TARGET_CRS': 'EPSG:4326',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -t_srs EPSG:4326 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target using proj string
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': custom_crs,
'TARGET_CRS': custom_crs,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:20936 -t_srs EPSG:20936 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target using custom projection
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': custom_crs,
'TARGET_CRS': custom_crs,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" -t_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target using custom projection and user-defined extent
custom_crs2 = 'proj4: +proj=longlat +a=6378388 +b=6356912 +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': custom_crs2,
'TARGET_CRS': custom_crs2,
'TARGET_EXTENT': '18.67,18.70,45.78,45.81',
'TARGET_EXTENT_CRS': custom_crs2,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdalwarp',
'-s_srs "+proj=longlat +a=6378388 +b=6356912 +no_defs" -t_srs "+proj=longlat +a=6378388 +b=6356912 +no_defs" -r near -te 18.67 45.78 18.7 45.81 -te_srs "+proj=longlat +a=6378388 +b=6356912 +no_defs" -of GTiff ' +
source + ' ' +
outdir + '/check.tif'])
# with non-EPSG crs code
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'POSTGIS:3111',
'TARGET_CRS': 'POSTGIS:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -t_srs EPSG:3111 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target resolution with None value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'EPSG:3111',
'TARGET_RESOLUTION': None,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# test target resolution with a valid value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'EPSG:3111',
'TARGET_RESOLUTION': 10.0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -tr 10.0 10.0 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# test target resolution with a value of zero, to be ignored
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'EPSG:3111',
'TARGET_RESOLUTION': 0.0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with additional command-line parameter
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-dstalpha',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-r near -of JPEG -dstalpha ' +
source + ' ' +
outdir + '/check.jpg'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-dstalpha -srcnodata -9999',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-r near -of JPEG -dstalpha -srcnodata -9999 ' +
source + ' ' +
outdir + '/check.jpg'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-dstalpha -srcnodata "-9999 -8888"',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-r near -of JPEG -dstalpha -srcnodata "-9999 -8888" ' +
source + ' ' +
outdir + '/check.jpg'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
def testMerge(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = [os.path.join(testDataPath, 'dem1.tif'), os.path.join(testDataPath, 'dem1.tif')]
alg = merge()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# this algorithm creates temporary text file with input layers
# so we strip its path, leaving only filename
cmd = alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.tif'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('--optfile') + 10] + t[t.find('mergeInputFiles.txt'):]
self.assertEqual(cmd,
['gdal_merge.py',
'-ot Float32 -of GTiff ' +
'-o ' + outdir + '/check.tif ' +
'--optfile mergeInputFiles.txt'])
# separate
cmd = alg.getConsoleCommands({'INPUT': source,
'SEPARATE': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('--optfile') + 10] + t[t.find('mergeInputFiles.txt'):]
self.assertEqual(cmd,
['gdal_merge.py',
'-separate -ot Float32 -of GTiff ' +
'-o ' + outdir + '/check.tif ' +
'--optfile mergeInputFiles.txt'])
# assign nodata
cmd = alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-tap -ps 0.1 0.1',
'OUTPUT': outdir + '/check.tif'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('--optfile') + 10] + t[t.find('mergeInputFiles.txt'):]
self.assertEqual(cmd,
['gdal_merge.py',
'-ot Float32 -of GTiff -tap -ps 0.1 0.1 ' +
'-o ' + outdir + '/check.tif ' +
'--optfile mergeInputFiles.txt'])
# additional parameters
cmd = alg.getConsoleCommands({'INPUT': source,
'NODATA_OUTPUT': -9999,
'OUTPUT': outdir + '/check.tif'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('--optfile') + 10] + t[t.find('mergeInputFiles.txt'):]
self.assertEqual(cmd,
['gdal_merge.py',
'-a_nodata -9999 -ot Float32 -of GTiff ' +
'-o ' + outdir + '/check.tif ' +
'--optfile mergeInputFiles.txt'])
def testNearblack(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = nearblack()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# defaults
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['nearblack',
source + ' -of GTiff -o ' + outdir + '/check.tif ' +
'-near 15'])
# search white pixels
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'WHITE': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['nearblack',
source + ' -of GTiff -o ' + outdir + '/check.tif ' +
'-near 15 -white'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-nb 5 -setalpha',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['nearblack',
source + ' -of GTiff -o ' + outdir + '/check.tif ' +
'-near 15 -nb 5 -setalpha'])
# additional parameters and creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75',
'EXTRA': '-nb 5 -setalpha',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['nearblack',
source + ' -of GTiff -o ' + outdir + '/check.tif ' +
'-near 15 -co COMPRESS=JPEG -co JPEG_QUALITY=75 -nb 5 -setalpha'])
def testRearrangeBands(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/check.tif'
alg = rearrange_bands()
alg.initAlgorithm()
# single band
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BANDS': 1,
'OUTPUT': outsource}, context, feedback),
['gdal_translate', '-b 1 ' +
'-of GTiff ' +
source + ' ' + outsource])
# three bands, re-ordered
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BANDS': [3, 2, 1],
'OUTPUT': outsource}, context, feedback),
['gdal_translate', '-b 3 -b 2 -b 1 ' +
'-of GTiff ' +
source + ' ' + outsource])
# three bands, re-ordered with custom data type
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BANDS': [3, 2, 1],
'DATA_TYPE': 6,
'OUTPUT': outsource}, context, feedback),
['gdal_translate', '-b 3 -b 2 -b 1 ' +
'-ot Float32 -of GTiff ' +
source + ' ' + outsource])
def testFillnodata(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
mask = os.path.join(testDataPath, 'raster.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/check.tif'
alg = fillnodata()
alg.initAlgorithm()
# with mask value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'DISTANCE': 10,
'ITERATIONS': 0,
'MASK_LAYER': mask,
'NO_MASK': False,
'OUTPUT': outsource}, context, feedback),
['gdal_fillnodata.py',
'-md 10 -b 1 -mask ' +
mask +
' -of GTiff ' +
source + ' ' +
outsource])
# without mask value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'DISTANCE': 10,
'ITERATIONS': 0,
'NO_MASK': False,
'OUTPUT': outsource}, context, feedback),
['gdal_fillnodata.py',
'-md 10 -b 1 ' +
'-of GTiff ' +
source + ' ' +
outsource])
# nomask true
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'DISTANCE': 10,
'ITERATIONS': 0,
'NO_MASK': True,
'OUTPUT': outsource}, context, feedback),
['gdal_fillnodata.py',
'-md 10 -b 1 -nomask ' +
'-of GTiff ' +
source + ' ' +
outsource])
# creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75',
'OUTPUT': outsource}, context, feedback),
['gdal_fillnodata.py',
'-md 10 -b 1 -of GTiff -co COMPRESS=JPEG -co JPEG_QUALITY=75 ' +
source + ' ' +
outsource])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'EXTRA': '-q',
'OUTPUT': outsource}, context, feedback),
['gdal_fillnodata.py',
'-md 10 -b 1 -of GTiff -q ' +
source + ' ' +
outsource])
def testGdalAddo(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
with tempfile.TemporaryDirectory() as outdir:
alg = gdaladdo()
alg.initAlgorithm()
# defaults
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': False,
'RESAMPLING': 0,
'FORMAT': 0}, context, feedback),
['gdaladdo',
source + ' ' + '-r nearest 2 4 8 16'])
# with "clean" option
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': True,
'RESAMPLING': 0,
'FORMAT': 0}, context, feedback),
['gdaladdo',
source + ' ' + '-r nearest -clean 2 4 8 16'])
# ovr format
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': False,
'RESAMPLING': 0,
'FORMAT': 1}, context, feedback),
['gdaladdo',
source + ' ' + '-r nearest -ro 2 4 8 16'])
# Erdas format
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': False,
'RESAMPLING': 0,
'FORMAT': 2}, context, feedback),
['gdaladdo',
source + ' ' + '-r nearest --config USE_RRD YES 2 4 8 16'])
# custom resampling method format
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': False,
'RESAMPLING': 4,
'FORMAT': 0}, context, feedback),
['gdaladdo',
source + ' ' + '-r cubicspline 2 4 8 16'])
# more levels
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16 32 64',
'CLEAN': False,
'RESAMPLING': 0,
'FORMAT': 0}, context, feedback),
['gdaladdo',
source + ' ' + '-r nearest 2 4 8 16 32 64'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': False,
'EXTRA': '--config COMPRESS_OVERVIEW JPEG'}, context, feedback),
['gdaladdo',
source + ' ' + '--config COMPRESS_OVERVIEW JPEG 2 4 8 16'])
if GdalUtils.version() >= 230000:
# without levels
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'CLEAN': False}, context, feedback),
['gdaladdo',
source])
# without advanced params
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': False}, context, feedback),
['gdaladdo',
source + ' ' + '2 4 8 16'])
def testSieve(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
mask = os.path.join(testDataPath, 'raster.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/check.tif'
alg = sieve()
alg.initAlgorithm()
# defaults
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outsource}, context, feedback),
['gdal_sieve.py',
'-st 10 -4 -of GTiff ' +
source + ' ' +
outsource])
# Eight connectedness and custom threshold
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'THRESHOLD': 16,
'EIGHT_CONNECTEDNESS': True,
'OUTPUT': outsource}, context, feedback),
['gdal_sieve.py',
'-st 16 -8 -of GTiff ' +
source + ' ' +
outsource])
# without default mask layer
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NO_MASK': True,
'OUTPUT': outsource}, context, feedback),
['gdal_sieve.py',
'-st 10 -4 -nomask -of GTiff ' +
source + ' ' +
outsource])
# defaults with external validity mask
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK_LAYER': mask,
'OUTPUT': outsource}, context, feedback),
['gdal_sieve.py',
'-st 10 -4 -mask ' +
mask +
' -of GTiff ' +
source + ' ' +
outsource])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-q',
'OUTPUT': outsource}, context, feedback),
['gdal_sieve.py',
'-st 10 -4 -of GTiff -q ' +
source + ' ' +
outsource])
def testGdal2Xyz(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/check.csv'
alg = gdal2xyz()
alg.initAlgorithm()
# defaults
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'CSV': False,
'OUTPUT': outsource}, context, feedback),
['gdal2xyz.py',
'-band 1 ' +
source + ' ' +
outsource])
# csv output
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'CSV': True,
'OUTPUT': outsource}, context, feedback),
['gdal2xyz.py',
'-band 1 -csv ' +
source + ' ' +
outsource])
def testGdalPolygonize(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/check.shp'
alg = polygonize()
alg.initAlgorithm()
# defaults
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD': 'DN',
'EIGHT_CONNECTEDNESS': False,
'OUTPUT': outsource}, context, feedback),
['gdal_polygonize.py',
source + ' ' +
outsource + ' ' +
'-b 1 -f "ESRI Shapefile" check DN'
])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD': 'VAL',
'EIGHT_CONNECTEDNESS': False,
'OUTPUT': outsource}, context, feedback),
['gdal_polygonize.py',
source + ' ' +
outsource + ' ' +
'-b 1 -f "ESRI Shapefile" check VAL'
])
# 8 connectedness
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD': 'DN',
'EIGHT_CONNECTEDNESS': True,
'OUTPUT': outsource}, context, feedback),
['gdal_polygonize.py',
source + ' ' +
outsource + ' ' +
'-8 -b 1 -f "ESRI Shapefile" check DN'
])
# custom output format
outsource = outdir + '/check.gpkg'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD': 'DN',
'EIGHT_CONNECTEDNESS': False,
'OUTPUT': outsource}, context, feedback),
['gdal_polygonize.py',
source + ' ' +
outsource + ' ' +
'-b 1 -f "GPKG" check DN'
])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD': 'DN',
'EXTRA': '-nomask -q',
'OUTPUT': outsource}, context, feedback),
['gdal_polygonize.py',
source + ' ' +
outsource + ' ' +
'-b 1 -f "GPKG" -nomask -q check DN'
])
def testGdalPansharpen(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
panchrom = os.path.join(testDataPath, 'dem.tif')
spectral = os.path.join(testDataPath, 'raster.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/out.tif'
alg = pansharp()
alg.initAlgorithm()
# defaults
self.assertEqual(
alg.getConsoleCommands({'SPECTRAL': spectral,
'PANCHROMATIC': panchrom,
'OUTPUT': outsource}, context, feedback),
['gdal_pansharpen.py',
panchrom + ' ' +
spectral + ' ' +
outsource + ' ' +
'-r cubic -of GTiff'
])
# custom resampling
self.assertEqual(
alg.getConsoleCommands({'SPECTRAL': spectral,
'PANCHROMATIC': panchrom,
'RESAMPLING': 4,
'OUTPUT': outsource}, context, feedback),
['gdal_pansharpen.py',
panchrom + ' ' +
spectral + ' ' +
outsource + ' ' +
'-r lanczos -of GTiff'
])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'SPECTRAL': spectral,
'PANCHROMATIC': panchrom,
'EXTRA': '-bitdepth 12 -threads ALL_CPUS',
'OUTPUT': outsource}, context, feedback),
['gdal_pansharpen.py',
panchrom + ' ' +
spectral + ' ' +
outsource + ' ' +
'-r cubic -of GTiff -bitdepth 12 -threads ALL_CPUS'
])
def testGdalViewshed(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
dem = os.path.join(testDataPath, 'dem.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/out.tif'
alg = viewshed()
alg.initAlgorithm()
# defaults
self.assertEqual(
alg.getConsoleCommands({'INPUT': dem,
'BAND': 1,
'OBSERVER': '18.67274,45.80599',
'OUTPUT': outsource}, context, feedback),
['gdal_viewshed',
'-b 1 -ox 18.67274 -oy 45.80599 -oz 1.0 -tz 1.0 -md 100.0 -f GTiff ' +
dem + ' ' + outsource
])
self.assertEqual(
alg.getConsoleCommands({'INPUT': dem,
'BAND': 2,
'OBSERVER': '18.67274,45.80599',
'OBSERVER_HEIGHT': 1.8,
'TARGET_HEIGHT': 20,
'MAX_DISTANCE': 1000,
'OUTPUT': outsource}, context, feedback),
['gdal_viewshed',
'-b 2 -ox 18.67274 -oy 45.80599 -oz 1.8 -tz 20.0 -md 1000.0 -f GTiff ' +
dem + ' ' + outsource
])
self.assertEqual(
alg.getConsoleCommands({'INPUT': dem,
'BAND': 1,
'OBSERVER': '18.67274,45.80599',
'EXTRA': '-a_nodata=-9999 -cc 0.2',
'OUTPUT': outsource}, context, feedback),
['gdal_viewshed',
'-b 1 -ox 18.67274 -oy 45.80599 -oz 1.0 -tz 1.0 -md 100.0 -f GTiff ' +
'-a_nodata=-9999 -cc 0.2 ' + dem + ' ' + outsource
])
self.assertEqual(
alg.getConsoleCommands({'INPUT': dem,
'BAND': 1,
'OBSERVER': '18.67274,45.80599',
'OPTIONS': 'COMPRESS=DEFLATE|PREDICTOR=2|ZLEVEL=9',
'OUTPUT': outsource}, context, feedback),
['gdal_viewshed',
'-b 1 -ox 18.67274 -oy 45.80599 -oz 1.0 -tz 1.0 -md 100.0 -f GTiff ' +
'-co COMPRESS=DEFLATE -co PREDICTOR=2 -co ZLEVEL=9 ' + dem + ' ' + outsource
])
def testBuildVrt(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = buildvrt()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# defaults
cmd = alg.getConsoleCommands({'INPUT': [source],
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# custom resolution
cmd = alg.getConsoleCommands({'INPUT': [source],
'RESOLUTION': 2,
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution lowest -separate -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# single layer
cmd = alg.getConsoleCommands({'INPUT': [source],
'SEPARATE': False,
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# projection difference
cmd = alg.getConsoleCommands({'INPUT': [source],
'PROJ_DIFFERENCE': True,
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -allow_projection_difference -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# add alpha band
cmd = alg.getConsoleCommands({'INPUT': [source],
'ADD_ALPHA': True,
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -addalpha -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# assign CRS
cmd = alg.getConsoleCommands({'INPUT': [source],
'ASSIGN_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -a_srs EPSG:3111 -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
cmd = alg.getConsoleCommands({'INPUT': [source],
'ASSIGN_CRS': custom_crs,
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -a_srs EPSG:20936 -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# source NODATA
cmd = alg.getConsoleCommands({'INPUT': [source],
'SRC_NODATA': '-9999',
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -r nearest -srcnodata "-9999" ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
cmd = alg.getConsoleCommands({'INPUT': [source],
'SRC_NODATA': '-9999 9999',
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -r nearest -srcnodata "-9999 9999" ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
cmd = alg.getConsoleCommands({'INPUT': [source],
'SRC_NODATA': '',
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# additional parameters
cmd = alg.getConsoleCommands({'INPUT': [source],
'EXTRA': '-overwrite -optim RASTER -vrtnodata -9999',
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -r nearest -overwrite -optim RASTER -vrtnodata -9999 ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
if __name__ == '__main__':
nose2.main()
| gpl-2.0 | -3,146,958,377,692,507,600 | 46.808046 | 240 | 0.416785 | false | 4.772576 | true | false | false |
caedesvvv/metanomon | metanomon/nomon.py | 1 | 13700 | #!/usr/bin/env python
import gtk
import pango
from kiwi.ui.gadgets import quit_if_last
from kiwi.ui.delegates import GladeDelegate
from kiwi.ui.objectlist import ObjectList, Column, ObjectTree
from xmlrpclib import ServerProxy
from urllib import urlencode
import gtkmozembed
import gtksourceview
#import gtksourceview2 as gtksourceview
#import gtkhtml2
#import simplebrowser
from buffer import DokuwikiBuffer
dialog_buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)
class ModalDialog(gtk.Dialog):
def __init__(self, title):
gtk.Dialog.__init__(self, title = title,
flags = gtk.DIALOG_MODAL,
buttons = dialog_buttons)
# wrappers for kiwi treeview widgets
class Section(object):
def __init__(self, name, id=None):
self.name = name
if id:
self.id = id
else:
self.id = name
class DictWrapper(object):
def __init__(self, obj, id=None):
self._obj = obj
if id:
self.name = id
def __getattr__(self, name):
try:
return self._obj[name]
except:
raise AttributeError
# funtion to setup some simple style tags
def setup_tags(table):
for i,tag in enumerate(['h1','h2','h3','h4','h5','h6']):
tag_h1 = gtk.TextTag(tag)
tag_h1.set_property('size-points', 20-i*2)
tag_h1.set_property('weight', 700)
table.add(tag_h1)
tag_bold = gtk.TextTag('bold')
tag_bold.set_property('weight', 700)
table.add(tag_bold)
tag_italic = gtk.TextTag('italic')
tag_italic.set_property('style', pango.STYLE_ITALIC)
table.add(tag_italic)
# setup the tag table
#table = gtk.TextTagTable()
table = gtksourceview.SourceTagTable()
setup_tags(table)
# main application classes
class DokuwikiView(GladeDelegate):
"""
A dokuwiki editor window
"""
def __init__(self):
GladeDelegate.__init__(self, gladefile="pydoku",
delete_handler=self.quit_if_last)
self.setup_wikitree()
self.setup_attachments()
self.setup_side()
self.setup_sourceview()
self.setup_htmlview()
self.page_edit = self.view.notebook1.get_nth_page(0)
self.page_view = self.view.notebook1.get_nth_page(1)
self.page_attach = self.view.notebook1.get_nth_page(2)
self.show_all()
def quit_if_last(self, *args):
self.htmlview.destroy() # for some reason has to be deleted explicitly
GladeDelegate.quit_if_last(self)
# general interface functions
def post(self, text):
id = self.view.statusbar.get_context_id("zap")
self.view.statusbar.push(id, text)
# setup functions
def setup_side(self):
columns = ['user', 'sum', 'type', 'version', 'ip']
columns = [Column(s) for s in columns]
self.versionlist = ObjectList(columns)
self.view.side_vbox.pack_start(gtk.Label('Version Log:'), False, False)
self.view.side_vbox.add(self.versionlist)
self.view.side_vbox.pack_start(gtk.Label('BackLinks:'), False, False)
self.backlinks = ObjectList([Column('name')])
self.view.side_vbox.add(self.backlinks)
def setup_attachments(self):
columns = ['id', 'size', 'lastModified', 'writable', 'isimg', 'perms']
columns = [Column(s) for s in columns]
self.attachmentlist = ObjectList(columns)
self.view.attachments_vbox.add(self.attachmentlist)
def setup_wikitree(self):
columns = ['name', 'id', 'lastModified', 'perms', 'size']
columns = [Column(s) for s in columns]
self.objectlist = ObjectTree(columns)
self.objectlist.connect("selection-changed", self.selected)
self.view.vbox2.add(self.objectlist)
def setup_htmlview(self):
self.htmlview = gtkmozembed.MozEmbed()
self.view.html_scrolledwindow.add(self.htmlview)
self.htmlview.realize()
self.htmlview.show()
def setup_sourceview(self):
self.buffer = DokuwikiBuffer(table)
self.editor = gtksourceview.SourceView(self.buffer)
accel_group = gtk.AccelGroup()
self.get_toplevel().add_accel_group(accel_group)
self.editor.add_accelerator("paste-clipboard", accel_group, ord('v'), gtk.gdk.CONTROL_MASK, 0)
self.editor.add_accelerator("copy-clipboard", accel_group, ord('c'), gtk.gdk.CONTROL_MASK, 0)
self.editor.add_accelerator("cut-clipboard", accel_group, ord('x'), gtk.gdk.CONTROL_MASK, 0)
#self.editor = gtk.TextView(self.buffer)
self.editor.set_left_margin(5)
self.editor.set_right_margin(5)
self.editor.set_wrap_mode(gtk.WRAP_WORD_CHAR)
self.view.scrolledwindow1.add(self.editor)
# dokuwiki operations
def get_version(self):
version = self._rpc.dokuwiki.getVersion()
self.view.version.set_text(version)
def get_pagelist(self):
pages = self._rpc.wiki.getAllPages()
self._sections = {}
self.objectlist.clear()
for page in pages:
self.add_page(page)
self.view.new_page.set_sensitive(True)
self.view.delete_page.set_sensitive(True)
def get_attachments(self, ns):
attachments = self._rpc.wiki.getAttachments(ns, {})
attachments = [DictWrapper(s) for s in attachments]
self.attachmentlist.add_list(attachments)
def get_backlinks(self, pagename):
backlinks = self._rpc.wiki.getBackLinks(pagename)
backlinks = [Section(s) for s in backlinks]
self.backlinks.add_list(backlinks)
def get_versions(self, pagename):
versionlist = self._rpc.wiki.getPageVersions(pagename, 0)
versionlist = [DictWrapper(s) for s in versionlist]
self.versionlist.add_list(versionlist)
def get_htmlview(self, pagename):
text = self._rpc.wiki.getPageHTML(pagename)
self.htmlview.render_data(text, len(text), self.url.get_text(), 'text/html')
# XXX following is for gtkhtml (not used)
#self.document.clear()
#self.document.open_stream('text/html')
#self.document.write_stream(text)
#self.document.close_stream()
def put_page(self, text, summary, minor):
pars = {}
if summary:
pars['sum'] = summary
if minor:
pars['minor'] = minor
self._rpc.wiki.putPage(self.current, text, pars)
if not self.current in self._sections:
self.add_page({"id":self.current})
# put a page into the page tree
def add_page(self, page):
name = page["id"]
path = name.split(":")
prev = None
for i,pathm in enumerate(path):
if i == len(path)-1: # a page
new = DictWrapper(page, pathm)
self._sections[name] = new
self.objectlist.append(prev, new, False)
else: # a namespace
part_path = ":".join(path[:i+1])
if not part_path in self._sections:
new = Section(pathm, part_path)
self._sections[part_path] = new
self.objectlist.append(prev, new, False)
else:
new = self._sections[part_path]
prev = new
# page selected callback
def selected(self, widget, object):
if not object: return
if isinstance(object, Section):
self.get_attachments(object.id)
if not isinstance(object, DictWrapper): return
text = self._rpc.wiki.getPage(object.id)
self.current = object.id
self.buffer.add_text(text)
self.get_htmlview(self.current)
self.get_backlinks(object.id)
self.get_versions(object.id)
# kiwi interface callbacks
def on_view_edit__toggled(self, widget):
if widget.get_active():
self.notebook1.insert_page(self.page_edit, gtk.Label('edit'), 0)
else:
self.notebook1.remove_page(self.notebook1.page_num(self.page_edit))
def on_view_view__toggled(self, widget):
if widget.get_active():
self.notebook1.insert_page(self.page_view, gtk.Label('view'), 1)
else:
self.notebook1.remove_page(self.notebook1.page_num(self.page_view))
def on_view_attachments__toggled(self, widget):
if widget.get_active():
self.notebook1.insert_page(self.page_attach, gtk.Label('attach'))
else:
self.notebook1.remove_page(self.notebook1.page_num(self.page_attach))
def on_view_extra__toggled(self, widget):
if widget.get_active():
self.backlinks.show()
self.versionlist.show()
self.view.hpaned2.set_position(self._prevpos)
else:
self.backlinks.hide()
self.versionlist.hide()
self._prevpos = self.view.hpaned2.get_position()
self.view.hpaned2.set_position(self.view.hpaned2.allocation.width)
def on_button_list__clicked(self, *args):
self.post("Connecting...")
dialog = ModalDialog("User Details")
# prepare
widgets = {}
items = ["user", "password"]
for i,item in enumerate(items):
widgets[item] = gtk.Entry()
if i == 1:
widgets[item].set_visibility(False)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(item+': '))
hbox.add(widgets[item])
dialog.vbox.add(hbox)
dialog.show_all()
# run
response = dialog.run()
user = widgets['user'].get_text()
password = widgets['password'].get_text()
dialog.destroy()
if not response == gtk.RESPONSE_ACCEPT: return
# following commented line is for gtkhtml (not used)
#simplebrowser.currentUrl = self.view.url.get_text()
# handle response
params = urlencode({'u':user,'p':password})
fullurl = self.view.url.get_text() + "/lib/exe/xmlrpc.php?"+ params
self._rpc = ServerProxy(fullurl)
try:
self.get_version()
except:
self.post("Failure to connect")
self.get_pagelist()
self.post("Connected")
def on_delete_page__clicked(self, *args):
dialog = ModalDialog("Are you sure?")
response = dialog.run()
if response == gtk.RESPONSE_ACCEPT:
value = self._sections[self.current]
sel = self.objectlist.remove(value)
self._rpc.wiki.putPage(self.current, "", {})
self.current = None
dialog.destroy()
def on_new_page__clicked(self, *args):
dialog = ModalDialog("Name for the new page")
text_w = gtk.Entry()
text_w.show()
response = []
dialog.vbox.add(text_w)
response = dialog.run()
if response == gtk.RESPONSE_ACCEPT:
text = text_w.get_text()
if text:
self.current = text
dialog.destroy()
def on_button_h1__clicked(self, *args):
self.buffer.set_style('h1')
def on_button_h2__clicked(self, *args):
self.buffer.set_style('h2')
def on_button_h3__clicked(self, *args):
self.buffer.set_style('h3')
def on_button_h4__clicked(self, *args):
self.buffer.set_style('h4')
def on_button_h5__clicked(self, *args):
self.buffer.set_style('h5')
def on_button_h6__clicked(self, *args):
self.buffer.set_style('h6')
def on_button_bold__clicked(self, *args):
self.buffer.set_style('bold')
def on_button_italic__clicked(self, *args):
self.buffer.set_style('italic')
def on_button_clear_style__clicked(self, *args):
self.buffer.clear_style()
def on_button_save__clicked(self, *args):
self.post("Saving...")
dialog = ModalDialog("Commit message")
entry = gtk.Entry()
minor = gtk.CheckButton("Minor")
dialog.vbox.add(gtk.Label("Your attention to detail\nIs greatly appreciated"))
dialog.vbox.add(entry)
dialog.vbox.add(minor)
dialog.show_all()
response = dialog.run()
if response == gtk.RESPONSE_ACCEPT:
text = self.buffer.process_text()
self.put_page(text, entry.get_text(), minor.get_active())
self.get_htmlview(self.current)
self.get_versions(self.current)
self.post("Saved")
dialog.destroy()
# unused stuff
def request_url(self, document, url, stream):
f = simplebrowser.open_url(url)
stream.write(f.read())
def setup_htmlview_gtkhtml(self):
# XXX not used now
self.document = gtkhtml2.Document()
self.document.connect('request_url', self.request_url)
self.htmlview = gtkhtml2.View()
self.htmlview.set_document(self.document)
def setup_sourceview_gtksourceview(self):
# XXX not used now
self.buffer = gtksourceview.Buffer(table)
self.editor = gtksourceview.View(self.buffer)
if True:
self.editor.set_show_line_numbers(True)
lm = gtksourceview.LanguageManager()
self.editor.set_indent_on_tab(True)
self.editor.set_indent_width(4)
self.editor.set_property("auto-indent", True)
self.editor.set_property("highlight-current-line", True)
self.editor.set_insert_spaces_instead_of_tabs(True)
lang = lm.get_language("python")
self.buffer.set_language(lang)
self.buffer.set_highlight_syntax(True)
if __name__ == "__main__":
app = DokuwikiView()
app.show()
gtk.main()
| gpl-3.0 | -249,630,894,693,975,870 | 33.59596 | 102 | 0.6 | false | 3.595801 | false | false | false |
cloudbase/maas | src/maasserver/tests/test_forms.py | 1 | 46400 | # Copyright 2012, 2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test forms."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = []
import json
from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import validate_email
from django.http import QueryDict
from maasserver.enum import (
ARCHITECTURE,
ARCHITECTURE_CHOICES,
NODE_AFTER_COMMISSIONING_ACTION_CHOICES,
NODE_STATUS,
NODEGROUP_STATUS,
NODEGROUPINTERFACE_MANAGEMENT,
)
from maasserver.forms import (
AdminNodeForm,
AdminNodeWithMACAddressesForm,
BulkNodeActionForm,
CommissioningScriptForm,
ConfigForm,
DownloadProgressForm,
EditUserForm,
get_action_form,
get_node_create_form,
get_node_edit_form,
initialize_node_group,
INTERFACES_VALIDATION_ERROR_MESSAGE,
MACAddressForm,
NewUserCreationForm,
NodeActionForm,
NodeForm,
NodeGroupEdit,
NodeGroupInterfaceForm,
NodeGroupWithInterfacesForm,
NodeWithMACAddressesForm,
ProfileForm,
remove_None_values,
UnconstrainedMultipleChoiceField,
ValidatorMultipleChoiceField,
)
from maasserver.models import (
Config,
MACAddress,
Node,
NodeGroup,
NodeGroupInterface,
)
from maasserver.models.config import DEFAULT_CONFIG
from maasserver.node_action import (
Commission,
Delete,
StartNode,
StopNode,
UseCurtin,
)
from maasserver.testing import reload_object
from maasserver.testing.factory import factory
from maasserver.testing.testcase import MAASServerTestCase
from metadataserver.models import CommissioningScript
from netaddr import IPNetwork
from provisioningserver.enum import POWER_TYPE_CHOICES
from testtools.matchers import (
AllMatch,
Equals,
MatchesRegex,
MatchesStructure,
)
class TestHelpers(MAASServerTestCase):
def test_initialize_node_group_leaves_nodegroup_reference_intact(self):
preselected_nodegroup = factory.make_node_group()
node = factory.make_node(nodegroup=preselected_nodegroup)
initialize_node_group(node)
self.assertEqual(preselected_nodegroup, node.nodegroup)
def test_initialize_node_group_initializes_nodegroup_to_form_value(self):
node = Node(
NODE_STATUS.DECLARED,
architecture=factory.getRandomEnum(ARCHITECTURE))
nodegroup = factory.make_node_group()
initialize_node_group(node, nodegroup)
self.assertEqual(nodegroup, node.nodegroup)
def test_initialize_node_group_defaults_to_master(self):
node = Node(
NODE_STATUS.DECLARED,
architecture=factory.getRandomEnum(ARCHITECTURE))
initialize_node_group(node)
self.assertEqual(NodeGroup.objects.ensure_master(), node.nodegroup)
class NodeWithMACAddressesFormTest(MAASServerTestCase):
def get_QueryDict(self, params):
query_dict = QueryDict('', mutable=True)
for k, v in params.items():
if isinstance(v, list):
query_dict.setlist(k, v)
else:
query_dict[k] = v
return query_dict
def make_params(self, mac_addresses=None, architecture=None,
hostname=None, nodegroup=None):
if mac_addresses is None:
mac_addresses = [factory.getRandomMACAddress()]
if architecture is None:
architecture = factory.getRandomEnum(ARCHITECTURE)
if hostname is None:
hostname = factory.make_name('hostname')
params = {
'mac_addresses': mac_addresses,
'architecture': architecture,
'hostname': hostname,
}
if nodegroup is not None:
params['nodegroup'] = nodegroup
return self.get_QueryDict(params)
def test_NodeWithMACAddressesForm_valid(self):
architecture = factory.getRandomEnum(ARCHITECTURE)
form = NodeWithMACAddressesForm(
self.make_params(
mac_addresses=['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'],
architecture=architecture))
self.assertTrue(form.is_valid())
self.assertEqual(
['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'],
form.cleaned_data['mac_addresses'])
self.assertEqual(architecture, form.cleaned_data['architecture'])
def test_NodeWithMACAddressesForm_simple_invalid(self):
# If the form only has one (invalid) MAC address field to validate,
# the error message in form.errors['mac_addresses'] is the
# message from the field's validation error.
form = NodeWithMACAddressesForm(
self.make_params(mac_addresses=['invalid']))
self.assertFalse(form.is_valid())
self.assertEqual(['mac_addresses'], list(form.errors))
self.assertEqual(
['Enter a valid MAC address (e.g. AA:BB:CC:DD:EE:FF).'],
form.errors['mac_addresses'])
def test_NodeWithMACAddressesForm_multiple_invalid(self):
# If the form has multiple MAC address fields to validate,
# if one or more fields are invalid, a single error message is
# present in form.errors['mac_addresses'] after validation.
form = NodeWithMACAddressesForm(
self.make_params(mac_addresses=['invalid_1', 'invalid_2']))
self.assertFalse(form.is_valid())
self.assertEqual(['mac_addresses'], list(form.errors))
self.assertEqual(
['One or more MAC addresses is invalid.'],
form.errors['mac_addresses'])
def test_NodeWithMACAddressesForm_empty(self):
# Empty values in the list of MAC addresses are simply ignored.
form = NodeWithMACAddressesForm(
self.make_params(
mac_addresses=[factory.getRandomMACAddress(), '']))
self.assertTrue(form.is_valid())
def test_NodeWithMACAddressesForm_save(self):
macs = ['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f']
form = NodeWithMACAddressesForm(self.make_params(mac_addresses=macs))
node = form.save()
self.assertIsNotNone(node.id) # The node is persisted.
self.assertSequenceEqual(
macs,
[mac.mac_address for mac in node.macaddress_set.all()])
def test_includes_nodegroup_field_for_new_node(self):
self.assertIn(
'nodegroup',
NodeWithMACAddressesForm(self.make_params()).fields)
def test_does_not_include_nodegroup_field_for_existing_node(self):
params = self.make_params()
node = factory.make_node()
self.assertNotIn(
'nodegroup',
NodeWithMACAddressesForm(params, instance=node).fields)
def test_sets_nodegroup_to_master_by_default(self):
self.assertEqual(
NodeGroup.objects.ensure_master(),
NodeWithMACAddressesForm(self.make_params()).save().nodegroup)
def test_leaves_nodegroup_alone_if_unset_on_existing_node(self):
# Selecting a node group for a node is only supported on new
# nodes. You can't change it later.
original_nodegroup = factory.make_node_group()
node = factory.make_node(nodegroup=original_nodegroup)
factory.make_node_group(network=IPNetwork("192.168.1.0/24"))
form = NodeWithMACAddressesForm(
self.make_params(nodegroup='192.168.1.0'), instance=node)
form.save()
self.assertEqual(original_nodegroup, reload_object(node).nodegroup)
def test_form_without_hostname_generates_hostname(self):
form = NodeWithMACAddressesForm(self.make_params(hostname=''))
node = form.save()
self.assertTrue(len(node.hostname) > 0)
def test_form_with_ip_based_hostname_generates_hostname(self):
ip_based_hostname = '192-168-12-10.domain'
form = NodeWithMACAddressesForm(
self.make_params(hostname=ip_based_hostname))
node = form.save()
self.assertNotEqual(ip_based_hostname, node.hostname)
class TestOptionForm(ConfigForm):
field1 = forms.CharField(label="Field 1", max_length=10)
field2 = forms.BooleanField(label="Field 2", required=False)
class TestValidOptionForm(ConfigForm):
maas_name = forms.CharField(label="Field 1", max_length=10)
class ConfigFormTest(MAASServerTestCase):
def test_form_valid_saves_into_db(self):
value = factory.getRandomString(10)
form = TestValidOptionForm({'maas_name': value})
result = form.save()
self.assertTrue(result)
self.assertEqual(value, Config.objects.get_config('maas_name'))
def test_form_rejects_unknown_settings(self):
value = factory.getRandomString(10)
value2 = factory.getRandomString(10)
form = TestOptionForm({'field1': value, 'field2': value2})
valid = form.is_valid()
self.assertFalse(valid)
self.assertIn('field1', form._errors)
self.assertIn('field2', form._errors)
def test_form_invalid_does_not_save_into_db(self):
value_too_long = factory.getRandomString(20)
form = TestOptionForm({'field1': value_too_long, 'field2': False})
result = form.save()
self.assertFalse(result)
self.assertIn('field1', form._errors)
self.assertIsNone(Config.objects.get_config('field1'))
self.assertIsNone(Config.objects.get_config('field2'))
def test_form_loads_initial_values(self):
value = factory.getRandomString()
Config.objects.set_config('field1', value)
form = TestOptionForm()
self.assertItemsEqual(['field1'], form.initial)
self.assertEqual(value, form.initial['field1'])
def test_form_loads_initial_values_from_default_value(self):
value = factory.getRandomString()
DEFAULT_CONFIG['field1'] = value
form = TestOptionForm()
self.assertItemsEqual(['field1'], form.initial)
self.assertEqual(value, form.initial['field1'])
class NodeEditForms(MAASServerTestCase):
def test_NodeForm_contains_limited_set_of_fields(self):
form = NodeForm()
self.assertEqual(
[
'hostname',
'after_commissioning_action',
'architecture',
'distro_series',
'nodegroup',
], list(form.fields))
def test_NodeForm_changes_node(self):
node = factory.make_node()
hostname = factory.getRandomString()
after_commissioning_action = factory.getRandomChoice(
NODE_AFTER_COMMISSIONING_ACTION_CHOICES)
form = NodeForm(
data={
'hostname': hostname,
'after_commissioning_action': after_commissioning_action,
'architecture': factory.getRandomChoice(ARCHITECTURE_CHOICES),
},
instance=node)
form.save()
self.assertEqual(hostname, node.hostname)
self.assertEqual(
after_commissioning_action, node.after_commissioning_action)
def test_AdminNodeForm_contains_limited_set_of_fields(self):
node = factory.make_node()
form = AdminNodeForm(instance=node)
self.assertEqual(
[
'hostname',
'after_commissioning_action',
'architecture',
'distro_series',
'power_type',
'power_parameters',
'cpu_count',
'memory',
'storage',
'zone',
],
list(form.fields))
def test_AdminNodeForm_changes_node(self):
node = factory.make_node()
zone = factory.make_zone()
hostname = factory.getRandomString()
after_commissioning_action = factory.getRandomChoice(
NODE_AFTER_COMMISSIONING_ACTION_CHOICES)
power_type = factory.getRandomChoice(POWER_TYPE_CHOICES)
form = AdminNodeForm(
data={
'hostname': hostname,
'after_commissioning_action': after_commissioning_action,
'power_type': power_type,
'architecture': factory.getRandomChoice(ARCHITECTURE_CHOICES),
'zone': zone.name,
},
instance=node)
form.save()
self.assertEqual(
(
node.hostname,
node.after_commissioning_action,
node.power_type,
node.zone,
),
(hostname, after_commissioning_action, power_type, zone))
def test_AdminNodeForm_refuses_to_update_hostname_on_allocated_node(self):
old_name = factory.make_name('old-hostname')
new_name = factory.make_name('new-hostname')
node = factory.make_node(
hostname=old_name, status=NODE_STATUS.ALLOCATED)
form = AdminNodeForm(
data={
'hostname': new_name,
'architecture': node.architecture,
},
instance=node)
self.assertFalse(form.is_valid())
self.assertEqual(
["Can't change hostname to %s: node is in use." % new_name],
form._errors['hostname'])
def test_AdminNodeForm_accepts_unchanged_hostname_on_allocated_node(self):
old_name = factory.make_name('old-hostname')
node = factory.make_node(
hostname=old_name, status=NODE_STATUS.ALLOCATED)
form = AdminNodeForm(
data={
'hostname': old_name,
'architecture': node.architecture,
},
instance=node)
self.assertTrue(form.is_valid(), form._errors)
form.save()
self.assertEqual(old_name, reload_object(node).hostname)
def test_remove_None_values_removes_None_values_in_dict(self):
random_input = factory.getRandomString()
self.assertEqual(
{random_input: random_input},
remove_None_values({
random_input: random_input,
factory.getRandomString(): None
}))
def test_remove_None_values_leaves_empty_dict_untouched(self):
self.assertEqual({}, remove_None_values({}))
def test_AdminNodeForm_changes_node_with_skip_check(self):
node = factory.make_node()
hostname = factory.getRandomString()
after_commissioning_action = factory.getRandomChoice(
NODE_AFTER_COMMISSIONING_ACTION_CHOICES)
power_type = factory.getRandomChoice(POWER_TYPE_CHOICES)
power_parameters_field = factory.getRandomString()
form = AdminNodeForm(
data={
'hostname': hostname,
'after_commissioning_action': after_commissioning_action,
'architecture': factory.getRandomChoice(ARCHITECTURE_CHOICES),
'power_type': power_type,
'power_parameters_field': power_parameters_field,
'power_parameters_skip_check': True,
},
instance=node)
form.save()
self.assertEqual(
(hostname, after_commissioning_action, power_type,
{'field': power_parameters_field}),
(node.hostname, node.after_commissioning_action, node.power_type,
node.power_parameters))
def test_AdminForm_does_not_permit_nodegroup_change(self):
# We had to make Node.nodegroup editable to get Django to
# validate it as non-blankable, but that doesn't mean that we
# actually want to allow people to edit it through API or UI.
old_nodegroup = factory.make_node_group()
node = factory.make_node(nodegroup=old_nodegroup)
new_nodegroup = factory.make_node_group()
form = AdminNodeForm(data={'nodegroup': new_nodegroup}, instance=node)
self.assertRaises(ValueError, form.save)
def test_get_node_edit_form_returns_NodeForm_if_non_admin(self):
user = factory.make_user()
self.assertEqual(NodeForm, get_node_edit_form(user))
def test_get_node_edit_form_returns_APIAdminNodeEdit_if_admin(self):
admin = factory.make_admin()
self.assertEqual(AdminNodeForm, get_node_edit_form(admin))
def test_get_node_create_form_if_non_admin(self):
user = factory.make_user()
self.assertEqual(
NodeWithMACAddressesForm, get_node_create_form(user))
def test_get_node_create_form_if_admin(self):
admin = factory.make_admin()
self.assertEqual(
AdminNodeWithMACAddressesForm, get_node_create_form(admin))
class TestNodeActionForm(MAASServerTestCase):
def test_get_action_form_creates_form_class_with_attributes(self):
user = factory.make_admin()
form_class = get_action_form(user)
self.assertEqual(user, form_class.user)
def test_get_action_form_creates_form_class(self):
user = factory.make_admin()
node = factory.make_node(status=NODE_STATUS.DECLARED)
form = get_action_form(user)(node)
self.assertIsInstance(form, NodeActionForm)
self.assertEqual(node, form.node)
def test_get_action_form_for_admin(self):
admin = factory.make_admin()
node = factory.make_node(status=NODE_STATUS.DECLARED)
node.use_traditional_installer()
form = get_action_form(admin)(node)
self.assertItemsEqual(
[Commission.name, Delete.name, UseCurtin.name],
form.actions)
def test_get_action_form_for_user(self):
user = factory.make_user()
node = factory.make_node(status=NODE_STATUS.DECLARED)
form = get_action_form(user)(node)
self.assertIsInstance(form, NodeActionForm)
self.assertEqual(node, form.node)
self.assertItemsEqual({}, form.actions)
def test_save_performs_requested_action(self):
admin = factory.make_admin()
node = factory.make_node(status=NODE_STATUS.DECLARED)
form = get_action_form(admin)(
node, {NodeActionForm.input_name: Commission.name})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(NODE_STATUS.COMMISSIONING, node.status)
def test_rejects_disallowed_action(self):
user = factory.make_user()
node = factory.make_node(status=NODE_STATUS.DECLARED)
form = get_action_form(user)(
node, {NodeActionForm.input_name: Commission.name})
self.assertFalse(form.is_valid())
self.assertEquals(
{'action': ['Not a permitted action: %s.' % Commission.name]},
form._errors)
def test_rejects_unknown_action(self):
user = factory.make_user()
node = factory.make_node(status=NODE_STATUS.DECLARED)
action = factory.getRandomString()
form = get_action_form(user)(
node, {NodeActionForm.input_name: action})
self.assertFalse(form.is_valid())
self.assertIn(
"is not one of the available choices.", form._errors['action'][0])
class TestUniqueEmailForms(MAASServerTestCase):
def assertFormFailsValidationBecauseEmailNotUnique(self, form):
self.assertFalse(form.is_valid())
self.assertIn('email', form._errors)
self.assertEquals(1, len(form._errors['email']))
# Cope with 'Email' and 'E-mail' in error message.
self.assertThat(
form._errors['email'][0],
MatchesRegex(
r'User with this E-{0,1}mail address already exists.'))
def test_ProfileForm_fails_validation_if_email_taken(self):
another_email = '%[email protected]' % factory.getRandomString()
factory.make_user(email=another_email)
email = '%[email protected]' % factory.getRandomString()
user = factory.make_user(email=email)
form = ProfileForm(instance=user, data={'email': another_email})
self.assertFormFailsValidationBecauseEmailNotUnique(form)
def test_ProfileForm_validates_if_email_unchanged(self):
email = '%[email protected]' % factory.getRandomString()
user = factory.make_user(email=email)
form = ProfileForm(instance=user, data={'email': email})
self.assertTrue(form.is_valid())
def test_NewUserCreationForm_fails_validation_if_email_taken(self):
email = '%[email protected]' % factory.getRandomString()
username = factory.getRandomString()
password = factory.getRandomString()
factory.make_user(email=email)
form = NewUserCreationForm(
{
'email': email,
'username': username,
'password1': password,
'password2': password,
})
self.assertFormFailsValidationBecauseEmailNotUnique(form)
def test_EditUserForm_fails_validation_if_email_taken(self):
another_email = '%[email protected]' % factory.getRandomString()
factory.make_user(email=another_email)
email = '%[email protected]' % factory.getRandomString()
user = factory.make_user(email=email)
form = EditUserForm(instance=user, data={'email': another_email})
self.assertFormFailsValidationBecauseEmailNotUnique(form)
def test_EditUserForm_validates_if_email_unchanged(self):
email = '%[email protected]' % factory.getRandomString()
user = factory.make_user(email=email)
form = EditUserForm(
instance=user,
data={
'email': email,
'username': factory.getRandomString(),
})
self.assertTrue(form.is_valid())
class TestNewUserCreationForm(MAASServerTestCase):
def test_saves_to_db_by_default(self):
password = factory.make_name('password')
params = {
'email': '%[email protected]' % factory.getRandomString(),
'username': factory.make_name('user'),
'password1': password,
'password2': password,
}
form = NewUserCreationForm(params)
form.save()
self.assertIsNotNone(User.objects.get(username=params['username']))
def test_does_not_save_to_db_if_commit_is_False(self):
password = factory.make_name('password')
params = {
'email': '%[email protected]' % factory.getRandomString(),
'username': factory.make_name('user'),
'password1': password,
'password2': password,
}
form = NewUserCreationForm(params)
form.save(commit=False)
self.assertItemsEqual(
[], User.objects.filter(username=params['username']))
def test_fields_order(self):
form = NewUserCreationForm()
self.assertEqual(
['username', 'last_name', 'email', 'password1', 'password2',
'is_superuser'],
list(form.fields))
class TestMACAddressForm(MAASServerTestCase):
def test_MACAddressForm_creates_mac_address(self):
node = factory.make_node()
mac = factory.getRandomMACAddress()
form = MACAddressForm(node=node, data={'mac_address': mac})
form.save()
self.assertTrue(
MACAddress.objects.filter(node=node, mac_address=mac).exists())
def test_saves_to_db_by_default(self):
node = factory.make_node()
mac = factory.getRandomMACAddress()
form = MACAddressForm(node=node, data={'mac_address': mac})
form.save()
self.assertEqual(
mac, MACAddress.objects.get(mac_address=mac).mac_address)
def test_does_not_save_to_db_if_commit_is_False(self):
node = factory.make_node()
mac = factory.getRandomMACAddress()
form = MACAddressForm(node=node, data={'mac_address': mac})
form.save(commit=False)
self.assertItemsEqual([], MACAddress.objects.filter(mac_address=mac))
def test_MACAddressForm_displays_error_message_if_mac_already_used(self):
mac = factory.getRandomMACAddress()
node = factory.make_mac_address(address=mac)
node = factory.make_node()
form = MACAddressForm(node=node, data={'mac_address': mac})
self.assertFalse(form.is_valid())
self.assertEquals(
{'mac_address': ['This MAC address is already registered.']},
form._errors)
self.assertFalse(
MACAddress.objects.filter(node=node, mac_address=mac).exists())
def make_interface_settings():
"""Create a dict of arbitrary interface configuration parameters."""
network = factory.getRandomNetwork()
return {
'ip': factory.getRandomIPInNetwork(network),
'interface': factory.make_name('interface'),
'subnet_mask': unicode(network.netmask),
'broadcast_ip': unicode(network.broadcast),
'router_ip': factory.getRandomIPInNetwork(network),
'ip_range_low': factory.getRandomIPInNetwork(network),
'ip_range_high': factory.getRandomIPInNetwork(network),
'management': factory.getRandomEnum(NODEGROUPINTERFACE_MANAGEMENT),
}
nullable_fields = [
'subnet_mask', 'broadcast_ip', 'router_ip', 'ip_range_low',
'ip_range_high']
class TestNodeGroupInterfaceForm(MAASServerTestCase):
def test_NodeGroupInterfaceForm_validates_parameters(self):
form = NodeGroupInterfaceForm(data={'ip': factory.getRandomString()})
self.assertFalse(form.is_valid())
self.assertEquals(
{'ip': ['Enter a valid IPv4 or IPv6 address.']}, form._errors)
def test_NodeGroupInterfaceForm_can_save_fields_being_None(self):
settings = make_interface_settings()
settings['management'] = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED
for field_name in nullable_fields:
del settings[field_name]
nodegroup = factory.make_node_group()
form = NodeGroupInterfaceForm(
data=settings, instance=NodeGroupInterface(nodegroup=nodegroup))
interface = form.save()
field_values = [
getattr(interface, field_name) for field_name in nullable_fields]
self.assertThat(field_values, AllMatch(Equals('')))
class TestNodeGroupWithInterfacesForm(MAASServerTestCase):
def test_creates_pending_nodegroup(self):
name = factory.make_name('name')
uuid = factory.getRandomUUID()
form = NodeGroupWithInterfacesForm(
data={'name': name, 'uuid': uuid})
self.assertTrue(form.is_valid(), form._errors)
nodegroup = form.save()
self.assertEqual(
(uuid, name, NODEGROUP_STATUS.PENDING, 0),
(
nodegroup.uuid,
nodegroup.name,
nodegroup.status,
nodegroup.nodegroupinterface_set.count(),
))
def test_creates_nodegroup_with_status(self):
name = factory.make_name('name')
uuid = factory.getRandomUUID()
form = NodeGroupWithInterfacesForm(
status=NODEGROUP_STATUS.ACCEPTED,
data={'name': name, 'uuid': uuid})
self.assertTrue(form.is_valid(), form._errors)
nodegroup = form.save()
self.assertEqual(NODEGROUP_STATUS.ACCEPTED, nodegroup.status)
def test_validates_parameters(self):
name = factory.make_name('name')
too_long_uuid = 'test' * 30
form = NodeGroupWithInterfacesForm(
data={'name': name, 'uuid': too_long_uuid})
self.assertFalse(form.is_valid())
self.assertEquals(
{'uuid':
['Ensure this value has at most 36 characters (it has 120).']},
form._errors)
def test_rejects_invalid_json_interfaces(self):
name = factory.make_name('name')
uuid = factory.getRandomUUID()
invalid_interfaces = factory.make_name('invalid_json_interfaces')
form = NodeGroupWithInterfacesForm(
data={
'name': name, 'uuid': uuid, 'interfaces': invalid_interfaces})
self.assertFalse(form.is_valid())
self.assertEquals(
{'interfaces': ['Invalid json value.']},
form._errors)
def test_rejects_invalid_list_interfaces(self):
name = factory.make_name('name')
uuid = factory.getRandomUUID()
invalid_interfaces = json.dumps('invalid interface list')
form = NodeGroupWithInterfacesForm(
data={
'name': name, 'uuid': uuid, 'interfaces': invalid_interfaces})
self.assertFalse(form.is_valid())
self.assertEquals(
{'interfaces': [INTERFACES_VALIDATION_ERROR_MESSAGE]},
form._errors)
def test_rejects_invalid_interface(self):
name = factory.make_name('name')
uuid = factory.getRandomUUID()
interface = make_interface_settings()
# Make the interface invalid.
interface['ip_range_high'] = 'invalid IP address'
interfaces = json.dumps([interface])
form = NodeGroupWithInterfacesForm(
data={'name': name, 'uuid': uuid, 'interfaces': interfaces})
self.assertFalse(form.is_valid())
self.assertIn(
"Enter a valid IPv4 or IPv6 address",
form._errors['interfaces'][0])
def test_creates_interface_from_params(self):
name = factory.make_name('name')
uuid = factory.getRandomUUID()
interface = make_interface_settings()
interfaces = json.dumps([interface])
form = NodeGroupWithInterfacesForm(
data={'name': name, 'uuid': uuid, 'interfaces': interfaces})
self.assertTrue(form.is_valid(), form._errors)
form.save()
nodegroup = NodeGroup.objects.get(uuid=uuid)
self.assertThat(
nodegroup.nodegroupinterface_set.all()[0],
MatchesStructure.byEquality(**interface))
def test_checks_presence_of_other_managed_interfaces(self):
name = factory.make_name('name')
uuid = factory.getRandomUUID()
interfaces = []
for index in range(2):
interface = make_interface_settings()
interface['management'] = factory.getRandomEnum(
NODEGROUPINTERFACE_MANAGEMENT,
but_not=(NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED, ))
interfaces.append(interface)
interfaces = json.dumps(interfaces)
form = NodeGroupWithInterfacesForm(
data={'name': name, 'uuid': uuid, 'interfaces': interfaces})
self.assertFalse(form.is_valid())
self.assertIn(
"Only one managed interface can be configured for this cluster",
form._errors['interfaces'][0])
def test_creates_multiple_interfaces(self):
name = factory.make_name('name')
uuid = factory.getRandomUUID()
interface1 = make_interface_settings()
# Only one interface at most can be 'managed'.
interface2 = make_interface_settings()
interface2['management'] = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED
interfaces = json.dumps([interface1, interface2])
form = NodeGroupWithInterfacesForm(
data={'name': name, 'uuid': uuid, 'interfaces': interfaces})
self.assertTrue(form.is_valid(), form._errors)
form.save()
nodegroup = NodeGroup.objects.get(uuid=uuid)
self.assertEqual(2, nodegroup.nodegroupinterface_set.count())
def test_populates_cluster_name_default(self):
name = factory.make_name('name')
uuid = factory.getRandomUUID()
form = NodeGroupWithInterfacesForm(
status=NODEGROUP_STATUS.ACCEPTED,
data={'name': name, 'uuid': uuid})
self.assertTrue(form.is_valid(), form._errors)
nodegroup = form.save()
self.assertIn(uuid, nodegroup.cluster_name)
def test_populates_cluster_name(self):
cluster_name = factory.make_name('cluster_name')
uuid = factory.getRandomUUID()
form = NodeGroupWithInterfacesForm(
status=NODEGROUP_STATUS.ACCEPTED,
data={'cluster_name': cluster_name, 'uuid': uuid})
self.assertTrue(form.is_valid(), form._errors)
nodegroup = form.save()
self.assertEqual(cluster_name, nodegroup.cluster_name)
def test_creates_unmanaged_interfaces(self):
name = factory.make_name('name')
uuid = factory.getRandomUUID()
interface = make_interface_settings()
del interface['management']
interfaces = json.dumps([interface])
form = NodeGroupWithInterfacesForm(
data={'name': name, 'uuid': uuid, 'interfaces': interfaces})
self.assertTrue(form.is_valid(), form._errors)
form.save()
uuid_nodegroup = NodeGroup.objects.get(uuid=uuid)
self.assertEqual(
[NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED],
[
nodegroup.management for nodegroup in
uuid_nodegroup.nodegroupinterface_set.all()
])
class TestNodeGroupEdit(MAASServerTestCase):
def make_form_data(self, nodegroup):
"""Create `NodeGroupEdit` form data based on `nodegroup`."""
return {
'name': nodegroup.name,
'cluster_name': nodegroup.cluster_name,
'status': nodegroup.status,
}
def test_changes_name(self):
nodegroup = factory.make_node_group(name=factory.make_name('old-name'))
new_name = factory.make_name('new-name')
data = self.make_form_data(nodegroup)
data['name'] = new_name
form = NodeGroupEdit(instance=nodegroup, data=data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(new_name, reload_object(nodegroup).name)
def test_refuses_name_change_if_dns_managed_and_nodes_in_use(self):
nodegroup, node = factory.make_unrenamable_nodegroup_with_node()
data = self.make_form_data(nodegroup)
data['name'] = factory.make_name('new-name')
form = NodeGroupEdit(instance=nodegroup, data=data)
self.assertFalse(form.is_valid())
def test_accepts_unchanged_name(self):
nodegroup, node = factory.make_unrenamable_nodegroup_with_node()
original_name = nodegroup.name
form = NodeGroupEdit(
instance=nodegroup, data=self.make_form_data(nodegroup))
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(original_name, reload_object(nodegroup).name)
def test_accepts_omitted_name(self):
nodegroup, node = factory.make_unrenamable_nodegroup_with_node()
original_name = nodegroup.name
data = self.make_form_data(nodegroup)
del data['name']
form = NodeGroupEdit(instance=nodegroup, data=data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(original_name, reload_object(nodegroup).name)
def test_accepts_name_change_if_nodegroup_not_accepted(self):
nodegroup, node = factory.make_unrenamable_nodegroup_with_node()
nodegroup.status = NODEGROUP_STATUS.PENDING
data = self.make_form_data(nodegroup)
data['name'] = factory.make_name('new-name')
form = NodeGroupEdit(instance=nodegroup, data=data)
self.assertTrue(form.is_valid())
def test_accepts_name_change_if_dns_managed_but_no_nodes_in_use(self):
nodegroup, node = factory.make_unrenamable_nodegroup_with_node()
node.status = NODE_STATUS.READY
node.save()
data = self.make_form_data(nodegroup)
data['name'] = factory.make_name('new-name')
form = NodeGroupEdit(instance=nodegroup, data=data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(data['name'], reload_object(nodegroup).name)
def test_accepts_name_change_if_nodes_in_use_but_dns_not_managed(self):
nodegroup, node = factory.make_unrenamable_nodegroup_with_node()
interface = nodegroup.get_managed_interface()
interface.management = NODEGROUPINTERFACE_MANAGEMENT.DHCP
interface.save()
data = self.make_form_data(nodegroup)
data['name'] = factory.make_name('new-name')
form = NodeGroupEdit(instance=nodegroup, data=data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(data['name'], reload_object(nodegroup).name)
def test_accepts_name_change_if_nodegroup_has_no_interface(self):
nodegroup, node = factory.make_unrenamable_nodegroup_with_node()
NodeGroupInterface.objects.filter(nodegroup=nodegroup).delete()
data = self.make_form_data(nodegroup)
data['name'] = factory.make_name('new-name')
form = NodeGroupEdit(instance=nodegroup, data=data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(data['name'], reload_object(nodegroup).name)
class TestCommissioningScriptForm(MAASServerTestCase):
def test_creates_commissioning_script(self):
content = factory.getRandomString().encode('ascii')
name = factory.make_name('filename')
uploaded_file = SimpleUploadedFile(content=content, name=name)
form = CommissioningScriptForm(files={'content': uploaded_file})
self.assertTrue(form.is_valid(), form._errors)
form.save()
new_script = CommissioningScript.objects.get(name=name)
self.assertThat(
new_script,
MatchesStructure.byEquality(name=name, content=content))
def test_raises_if_duplicated_name(self):
content = factory.getRandomString().encode('ascii')
name = factory.make_name('filename')
factory.make_commissioning_script(name=name)
uploaded_file = SimpleUploadedFile(content=content, name=name)
form = CommissioningScriptForm(files={'content': uploaded_file})
self.assertEqual(
(False, {'content': ["A script with that name already exists."]}),
(form.is_valid(), form._errors))
def test_rejects_whitespace_in_name(self):
name = factory.make_name('with space')
content = factory.getRandomString().encode('ascii')
uploaded_file = SimpleUploadedFile(content=content, name=name)
form = CommissioningScriptForm(files={'content': uploaded_file})
self.assertFalse(form.is_valid())
self.assertEqual(
["Name contains disallowed characters (e.g. space or quotes)."],
form._errors['content'])
def test_rejects_quotes_in_name(self):
name = factory.make_name("l'horreur")
content = factory.getRandomString().encode('ascii')
uploaded_file = SimpleUploadedFile(content=content, name=name)
form = CommissioningScriptForm(files={'content': uploaded_file})
self.assertFalse(form.is_valid())
self.assertEqual(
["Name contains disallowed characters (e.g. space or quotes)."],
form._errors['content'])
class TestUnconstrainedMultipleChoiceField(MAASServerTestCase):
def test_accepts_list(self):
value = ['a', 'b']
instance = UnconstrainedMultipleChoiceField()
self.assertEqual(value, instance.clean(value))
class TestValidatorMultipleChoiceField(MAASServerTestCase):
def test_field_validates_valid_data(self):
value = ['[email protected]', '[email protected]']
field = ValidatorMultipleChoiceField(validator=validate_email)
self.assertEqual(value, field.clean(value))
def test_field_uses_validator(self):
value = ['[email protected]', 'invalid-email']
field = ValidatorMultipleChoiceField(validator=validate_email)
error = self.assertRaises(ValidationError, field.clean, value)
self.assertEquals(['Enter a valid email address.'], error.messages)
class TestBulkNodeActionForm(MAASServerTestCase):
def test_performs_action(self):
node1 = factory.make_node()
node2 = factory.make_node()
node3 = factory.make_node()
system_id_to_delete = [node1.system_id, node2.system_id]
form = BulkNodeActionForm(
user=factory.make_admin(),
data=dict(
action=Delete.name,
system_id=system_id_to_delete))
self.assertTrue(form.is_valid(), form._errors)
done, not_actionable, not_permitted = form.save()
existing_nodes = list(Node.objects.filter(
system_id__in=system_id_to_delete))
node3_system_id = reload_object(node3).system_id
self.assertEqual(
[2, 0, 0],
[done, not_actionable, not_permitted])
self.assertEqual(
[[], node3.system_id],
[existing_nodes, node3_system_id])
def test_first_action_is_empty(self):
form = BulkNodeActionForm(user=factory.make_admin())
action = form.fields['action']
default_action = action.choices[0][0]
required = action.required
# The default action is the empty string (i.e. no action)
# and it's a required field.
self.assertEqual(('', True), (default_action, required))
def test_gives_stat_when_not_applicable(self):
node1 = factory.make_node(status=NODE_STATUS.DECLARED)
node2 = factory.make_node(status=NODE_STATUS.FAILED_TESTS)
system_id_for_action = [node1.system_id, node2.system_id]
form = BulkNodeActionForm(
user=factory.make_admin(),
data=dict(
action=StartNode.name,
system_id=system_id_for_action))
self.assertTrue(form.is_valid(), form._errors)
done, not_actionable, not_permitted = form.save()
self.assertEqual(
[0, 2, 0],
[done, not_actionable, not_permitted])
def test_gives_stat_when_no_permission(self):
user = factory.make_user()
node = factory.make_node(
status=NODE_STATUS.ALLOCATED, owner=factory.make_user())
system_id_for_action = [node.system_id]
form = BulkNodeActionForm(
user=user,
data=dict(
action=StopNode.name,
system_id=system_id_for_action))
self.assertTrue(form.is_valid(), form._errors)
done, not_actionable, not_permitted = form.save()
self.assertEqual(
[0, 0, 1],
[done, not_actionable, not_permitted])
def test_gives_stat_when_action_is_inhibited(self):
node = factory.make_node(
status=NODE_STATUS.ALLOCATED, owner=factory.make_user())
form = BulkNodeActionForm(
user=factory.make_admin(),
data=dict(
action=Delete.name,
system_id=[node.system_id]))
self.assertTrue(form.is_valid(), form._errors)
done, not_actionable, not_permitted = form.save()
self.assertEqual(
[0, 1, 0],
[done, not_actionable, not_permitted])
def test_rejects_empty_system_ids(self):
form = BulkNodeActionForm(
user=factory.make_admin(),
data=dict(action=Delete.name, system_id=[]))
self.assertFalse(form.is_valid(), form._errors)
self.assertEqual(
["No node selected."],
form._errors['system_id'])
def test_rejects_invalid_system_ids(self):
node = factory.make_node()
system_id_to_delete = [node.system_id, "wrong-system_id"]
form = BulkNodeActionForm(
user=factory.make_admin(),
data=dict(
action=Delete.name,
system_id=system_id_to_delete))
self.assertFalse(form.is_valid(), form._errors)
self.assertEqual(
["Some of the given system ids are invalid system ids."],
form._errors['system_id'])
def test_rejects_if_no_action(self):
form = BulkNodeActionForm(
user=factory.make_admin(),
data=dict(system_id=[factory.make_node().system_id]))
self.assertFalse(form.is_valid(), form._errors)
def test_rejects_if_invalid_action(self):
form = BulkNodeActionForm(
user=factory.make_admin(),
data=dict(
action="invalid-action",
system_id=[factory.make_node().system_id]))
self.assertFalse(form.is_valid(), form._errors)
class TestDownloadProgressForm(MAASServerTestCase):
def test_updates_instance(self):
progress = factory.make_download_progress_incomplete(size=None)
new_bytes_downloaded = progress.bytes_downloaded + 1
size = progress.bytes_downloaded + 2
error = factory.getRandomString()
form = DownloadProgressForm(
data={
'size': size,
'bytes_downloaded': new_bytes_downloaded,
'error': error,
},
instance=progress)
new_progress = form.save()
progress = reload_object(progress)
self.assertEqual(progress, new_progress)
self.assertEqual(size, progress.size)
self.assertEqual(new_bytes_downloaded, progress.bytes_downloaded)
self.assertEqual(error, progress.error)
def test_rejects_unknown_ongoing_download(self):
form = DownloadProgressForm(
data={'bytes_downloaded': 1}, instance=None)
self.assertFalse(form.is_valid())
def test_get_download_returns_ongoing_download(self):
progress = factory.make_download_progress_incomplete()
self.assertEqual(
progress,
DownloadProgressForm.get_download(
progress.nodegroup, progress.filename,
progress.bytes_downloaded + 1))
def test_get_download_recognises_start_of_new_download(self):
nodegroup = factory.make_node_group()
filename = factory.getRandomString()
progress = DownloadProgressForm.get_download(nodegroup, filename, None)
self.assertIsNotNone(progress)
self.assertEqual(nodegroup, progress.nodegroup)
self.assertEqual(filename, progress.filename)
self.assertIsNone(progress.bytes_downloaded)
def test_get_download_returns_none_for_unknown_ongoing_download(self):
self.assertIsNone(
DownloadProgressForm.get_download(
factory.make_node_group(), factory.getRandomString(), 1))
| agpl-3.0 | -9,010,222,038,021,368,000 | 37.893546 | 79 | 0.629353 | false | 3.974985 | true | false | false |
karlind/ewu-v4 | accounts/migrations/0012_auto_20160714_1522.py | 1 | 1153 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-14 07:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0011_auto_20160714_1521'),
]
operations = [
migrations.AlterField(
model_name='account',
name='college',
field=models.CharField(blank=True, default='', max_length=30),
preserve_default=False,
),
migrations.AlterField(
model_name='account',
name='entry_year',
field=models.CharField(blank=True, default='', max_length=4),
preserve_default=False,
),
migrations.AlterField(
model_name='account',
name='phone',
field=models.CharField(blank=True, default='', max_length=11),
preserve_default=False,
),
migrations.AlterField(
model_name='account',
name='qq',
field=models.CharField(blank=True, default='', max_length=20),
preserve_default=False,
),
]
| gpl-3.0 | 209,679,865,083,511,330 | 28.564103 | 74 | 0.556808 | false | 4.302239 | false | false | false |
Jianlong-Peng/rp | python/scale.py | 1 | 6507 | '''
#=============================================================================
# FileName: scale.py
# Desc:
# Author: jlpeng
# Email: [email protected]
# HomePage:
# Created: 2013-09-23 20:43:19
# LastChange: 2014-04-09 10:49:21
# History:
#=============================================================================
'''
import sys
from getopt import getopt
from copy import deepcopy
def generate_parameter(infile):
'''
parameter
=========
infile: string, generated by `calcDescriptor.py`
return
======
para: dict
{atom_type:[[min...],[max...]], ...}
'''
descriptors = {} #{atom_type:[[values...],[values...],...], ...}
inf = open(infile,"r")
line = inf.readline()
while line != "":
line = inf.readline()
while line.startswith("\t"):
line = line.strip().split(":")
atom_type = line[1]
values = map(float,line[2].split(","))
if not descriptors.has_key(atom_type):
descriptors[atom_type] = []
descriptors[atom_type].append(values)
line = inf.readline()
inf.close()
para = {} #{atom_type:[[min...],[max...]],...}
for atom_type in descriptors.iterkeys():
para[atom_type] = [deepcopy(descriptors[atom_type][0]),deepcopy(descriptors[atom_type][0])]
for i in xrange(1,len(descriptors[atom_type])):
for j in xrange(len(descriptors[atom_type][i])):
if descriptors[atom_type][i][j] < para[atom_type][0][j]:
para[atom_type][0][j] = descriptors[atom_type][i][j]
if descriptors[atom_type][i][j] > para[atom_type][1][j]:
para[atom_type][1][j] = descriptors[atom_type][i][j]
return para
def save_parameter(para,outfile):
'''
parameter
=========
para: dict, {atom_type:[[min...],[max...]], ...}
outfile: string, where to save parameters
parameters will be saved as follows:
atom_type
\\tmin max
\\t...
atom_type
\\tmin max
\\t...
...
'''
outf = open(outfile,"w")
for key in para.iterkeys():
print >>outf, key
for i in xrange(len(para[key][0])):
print >>outf, "\t%.16g %.16g"%(para[key][0][i],para[key][1][i])
outf.close()
def read_parameter(infile):
'''
to read scaling parameters from `infile`
'''
para = {} # {atom_type:[[min...],[max...]],...}
inf = open(infile,"r")
line = inf.readline()
while line != "":
atom_type = line.strip()
if para.has_key(atom_type):
print >>sys.stderr, "Error: more than one set of scalling parameters found for atom type",atom_type
inf.close()
sys.exit(1)
para[atom_type] = [[],[]]
line = inf.readline()
while line.startswith("\t"):
line = line.split()
para[atom_type][0].append(float(line[0]))
para[atom_type][1].append(float(line[1]))
line = inf.readline()
inf.close()
return para
def scale(orig_value, min_, max_):
if min_ == max_:
#return orig_value
return 0.
else:
return 1.*(orig_value-min_)/(max_-min_)
def runScale(para, infile, outfile, verbose):
'''
to scale `infile` according to para, and scaled
values will be saved in `outfile`
'''
inf = open(infile,"r")
outf = open(outfile,"w")
line = inf.readline()
while line != "":
outf.write(line)
name = line.split()[0]
line = inf.readline()
while line.startswith("\t"):
line = line.strip().split(":")
if not para.has_key(line[1]):
print >>sys.stderr,"Error: Can't find scalling parameters for atom type",line[1]
inf.close()
outf.close()
sys.exit(1)
min_max = para[line[1]]
orig_values = line[2].split(",")
if len(min_max[0]) != len(orig_values):
print >>sys.stderr, "Error: different number of descriptors found for atom type",line[1]
print >>sys.stderr, " suppose to be %d, but found %d"%(len(min_max[0]),len(orig_values))
inf.close()
outf.close()
sys.exit(1)
scaled_value = scale(float(orig_values[0]),min_max[0][0],min_max[1][0])
if verbose and (scaled_value<=-0.5 or scaled_value>=1.5):
print "Warning:",name,line[0],line[1],"1",scaled_value
outf.write("\t%s:%s:%.6g"%(line[0],line[1],scaled_value))
for i in xrange(1,len(orig_values)):
scaled_value = scale(float(orig_values[i]),min_max[0][i],min_max[1][i])
if verbose and (scaled_value<=-0.5 or scaled_value>=1.5):
print "Warning:",name,line[0],line[1],i+1,scaled_value
outf.write(",%.6g"%scaled_value)
outf.write("\n")
line = inf.readline()
inf.close()
outf.close()
def main(argv=sys.argv):
if len(argv)!=5 and len(argv)!=6:
print "\nUsage:"
print " %s [options] infile outfile"%argv[0]
print "\nOptions:"
print " -s save_filename: save scaling parameters"
print " -r restore_filename: restore scaling parameters"
print " --verbose: if given, display those with scaled value <=-0.5 or >=1.5"
print "\nAttention:"
print " . if `-s` is given, `infile` will be scalled to (-1,1),"
print " and parameters will be saved in `save_filename`"
print " . if `-r` is given, scaling `infile` using `restore_filename` instead."
print ""
sys.exit(1)
options,args = getopt(argv[1:],"s:r:",["verbose"])
if len(args) != 2:
print "Error: invalid number of arguments"
sys.exit(1)
save_file = None
load_file = None
verbose = False
for opt,value in options:
if opt == "-s":
save_file = value
elif opt == "-r":
load_file = value
elif opt == "--verbose":
verbose = True
else:
print "Error: invalid option ",opt
sys.exit(1)
if save_file is not None:
para = generate_parameter(args[0])
save_parameter(para,save_file)
if load_file is not None:
para = read_parameter(load_file)
runScale(para,args[0],args[1],verbose)
main()
| gpl-2.0 | 2,984,936,988,208,512,000 | 33.428571 | 111 | 0.510988 | false | 3.649467 | false | false | false |
pmaigutyak/mp-shop | attributes/forms.py | 1 | 4450 |
from django import forms
from django.utils.translation import ugettext
from django.core.exceptions import ValidationError
from django.utils.functional import cached_property
from apps.products.models import Product
from attributes.models import (
Attribute,
AttributeValue,
AttributeOption)
class FilterForm(forms.Form):
def __init__(self, category, *args, **kwargs):
self._attributes = Attribute\
.objects\
.visible()\
.for_filter()\
.for_categories([category])
super().__init__(*args, **kwargs)
for attr in self._attributes:
self.fields[attr.full_slug] = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple,
label=attr.name,
required=False)
def set_options(self, entries):
if not entries:
self.fields = {}
return
choices = {attr.id: [] for attr in self._attributes}
attr_values = AttributeValue.objects.filter(
attr__in=self._attributes,
entry__in=entries
).values_list('id', flat=True)
options = AttributeOption\
.objects\
.filter(attr_values__in=attr_values)\
.order_by('name')\
.distinct()
for option in options:
choices[option.attr_id].append((option.id, option, ))
for attr in self._attributes:
if choices[attr.id]:
self.fields[attr.full_slug].choices = choices[attr.id]
else:
del self.fields[attr.full_slug]
def get_value_ids(self):
ids = []
for attr in self._attributes:
ids += self.data.getlist(attr.full_slug)
return ids
def _get_available_options(self):
added_options = []
options = {attr.pk: [] for attr in self._attributes}
attr_values = AttributeValue.objects.filter(
attribute__in=self._attributes,
entry__in=self._entries
).select_related('value_option')
for value in attr_values:
option = value.value_option
if option not in added_options:
added_options.append(option)
options[value.attribute_id].append(option)
return options
class AttributesForm(forms.ModelForm):
def __init__(
self,
data=None,
files=None,
instance=None,
initial=None,
**kwargs):
if instance and instance.pk:
initial = self._get_initial_data(instance)
super().__init__(
data=data,
files=files,
instance=instance,
initial=initial,
**kwargs)
for attr in self._attributes:
fields = attr.build_form_fields()
self.fields.update(fields)
def _get_initial_data(self, instance):
initial = {}
values = {
v.attr.full_slug: v.get_value()
for v in AttributeValue.objects.filter(
attr__in=self._attributes,
entry=instance
)
}
for attr in self._attributes:
initial[attr.full_slug] = values.get(attr.full_slug)
return initial
def clean(self):
data = self.cleaned_data
for attr in self._attributes:
if attr.has_options:
new_option = data.get(attr.get_option_form_field_name())
if new_option:
option, c = attr.options.get_or_create(name=new_option)
data[attr.full_slug] = option
if not data.get(attr.full_slug) and attr.is_required:
raise ValidationError({
attr.full_slug: ugettext('{} is required').format(
attr.name)
})
return data
def commit(self, instance):
for attr in Attribute.objects.for_categories([instance.category]):
if attr.full_slug in self.cleaned_data:
value = self.cleaned_data[attr.full_slug]
attr.save_value(instance, value)
return instance
@cached_property
def _attributes(self):
return list(Attribute.objects.all())
class Media:
js = ('attrs/form.js', )
class Meta:
model = Product
fields = ['id']
| isc | 2,767,729,096,030,983,700 | 24.428571 | 75 | 0.542697 | false | 4.48137 | false | false | false |
drejc/piRadio | test/button_test.py | 1 | 1190 | # Simple script to test read button state (pressed / not pressed) for TFT buttons
# Buttons are located on pins 12, 16 and 18
# BCM (Broadcom SOC channel) numbers are 18, 23 and 24
import os
import sys
import RPi.GPIO as GPIO
# import needed modules
lib_path = os.path.abspath(os.path.join('..'))
sys.path.append(lib_path)
from gpio.PushButtonTracker import PushButton
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# buttonUp = 18
# buttonMiddle = 23
# buttonDown = 24
#
# GPIO.setup(buttonUp, GPIO.IN, GPIO.PUD_UP)
# GPIO.setup(buttonMiddle, GPIO.IN, GPIO.PUD_UP)
# GPIO.setup(buttonDown, GPIO.IN, GPIO.PUD_UP)
upBtn = PushButton("Up", 18, GPIO.PUD_UP)
downBtn = PushButton("Down", 24, GPIO.PUD_UP)
middleBtn = PushButton("Middle", 23, GPIO.PUD_UP)
buttons = [upBtn, middleBtn, downBtn]
try:
while True:
if downBtn.doubleClicked():
print "EXIT"
GPIO.cleanup()
sys.exit(0)
for button in buttons:
if button.clicked():
print button.name
if button.doubleClicked():
print "Double click " + button.name
except KeyboardInterrupt: # trap a CTRL+C keyboard interrupt
GPIO.cleanup() # resets all GPIO ports used by this program | gpl-3.0 | 4,584,571,388,344,891,400 | 21.471698 | 81 | 0.70084 | false | 3.035714 | false | false | false |
fugitifduck/exabgp | lib/exabgp/netlink/message.py | 1 | 3747 | # encoding: utf-8
"""
interface.py
Created by Thomas Mangin on 2015-03-31.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
import os
import socket
from struct import pack
from struct import unpack
from struct import calcsize
from collections import namedtuple
from exabgp.netlink import NetLinkError
from exabgp.netlink.sequence import Sequence
from exabgp.netlink.attributes import Attributes
try:
getattr(socket,'AF_NETLINK')
except AttributeError:
raise ImportError('This module only works on unix version with netlink support')
class NetLinkMessage (object):
_IGNORE_SEQ_FAULTS = True
NETLINK_ROUTE = 0
format = namedtuple('Message','type flags seq pid data')
pid = os.getpid()
netlink = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, NETLINK_ROUTE)
class Header (object):
# linux/netlink.h
PACK = 'IHHII'
LEN = calcsize(PACK)
class Command (object):
NLMSG_NOOP = 0x01
NLMSG_ERROR = 0x02
NLMSG_DONE = 0x03
NLMSG_OVERRUN = 0x04
class Flags (object):
NLM_F_REQUEST = 0x01 # It is query message.
NLM_F_MULTI = 0x02 # Multipart message, terminated by NLMSG_DONE
NLM_F_ACK = 0x04 # Reply with ack, with zero or error code
NLM_F_ECHO = 0x08 # Echo this query
# Modifiers to GET query
NLM_F_ROOT = 0x100 # specify tree root
NLM_F_MATCH = 0x200 # return all matching
NLM_F_DUMP = NLM_F_ROOT | NLM_F_MATCH
NLM_F_ATOMIC = 0x400 # atomic GET
# Modifiers to NEW query
NLM_F_REPLACE = 0x100 # Override existing
NLM_F_EXCL = 0x200 # Do not touch, if it exists
NLM_F_CREATE = 0x400 # Create, if it does not exist
NLM_F_APPEND = 0x800 # Add to end of list
errors = {
Command.NLMSG_ERROR: 'netlink error',
Command.NLMSG_OVERRUN: 'netlink overrun',
}
@classmethod
def encode (cls, dtype, seq, flags, body, attributes):
attrs = Attributes.encode(attributes)
length = cls.Header.LEN + len(attrs) + len(body)
return pack(cls.Header.PACK, length, dtype, flags, seq, cls.pid) + body + attrs
@classmethod
def decode (cls, data):
while data:
length, ntype, flags, seq, pid = unpack(cls.Header.PACK,data[:cls.Header.LEN])
if len(data) < length:
raise NetLinkError("Buffer underrun")
yield cls.format(ntype, flags, seq, pid, data[cls.Header.LEN:length])
data = data[length:]
@classmethod
def send (cls, dtype, hflags, family=socket.AF_UNSPEC):
sequence = Sequence()
message = cls.encode(
dtype,
sequence,
hflags,
pack('Bxxx', family),
{}
)
cls.netlink.send(message)
while True:
data = cls.netlink.recv(640000)
for mtype, flags, seq, pid, data in cls.decode(data):
if seq != sequence:
if cls._IGNORE_SEQ_FAULTS:
continue
raise NetLinkError("netlink seq mismatch")
if mtype == NetLinkMessage.Command.NLMSG_DONE:
raise StopIteration()
elif dtype in cls.errors:
raise NetLinkError(cls.errors[mtype])
else:
yield data
# def change (self, dtype, family=socket.AF_UNSPEC):
# for _ in self.send(dtype, self.Flags.NLM_F_REQUEST | self.Flags.NLM_F_CREATE,family):
# yield _
class InfoMessage (object):
# to be defined by the subclasses
format = namedtuple('Parent', 'to be subclassed')
# to be defined by the subclasses
class Header (object):
PACK = ''
LEN = 0
@classmethod
def decode (cls, data):
extracted = list(unpack(cls.Header.PACK,data[:cls.Header.LEN]))
attributes = Attributes.decode(data[cls.Header.LEN:])
extracted.append(dict(attributes))
return cls.format(*extracted)
@classmethod
def extract (cls, atype, flags=NetLinkMessage.Flags.NLM_F_REQUEST | NetLinkMessage.Flags.NLM_F_DUMP, family=socket.AF_UNSPEC):
for data in NetLinkMessage.send(atype,flags,family):
yield cls.decode(data)
| bsd-3-clause | 3,252,601,582,341,618,700 | 26.350365 | 127 | 0.697625 | false | 2.95738 | false | false | false |
dairin0d/transform-utils | space_view3d_transform_utils/dairin0d/utils_ui.py | 5 | 22675 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import blf
from mathutils import Color, Vector, Matrix, Quaternion, Euler
from .bpy_inspect import BlRna
from .utils_python import DummyObject
from .utils_gl import cgl
#============================================================================#
# Note: making a similar wrapper for Operator.report is impossible,
# since Blender only shows the report from the currently executing operator.
# ===== MESSAGEBOX ===== #
if not hasattr(bpy.types, "WM_OT_messagebox"):
class WM_OT_messagebox(bpy.types.Operator):
bl_idname = "wm.messagebox"
# "Attention!" is quite generic caption that suits
# most of the situations when "OK" button is desirable.
# bl_label isn't really changeable at runtime
# (changing it causes some memory errors)
bl_label = "Attention!"
# We can't pass arguments through normal means,
# since in this case a "Reset" button would appear
args = {}
# If we don't define execute(), there would be
# an additional label "*Redo unsupported*"
def execute(self, context):
return {'FINISHED'}
def invoke(self, context, event):
text = self.args.get("text", "")
self.icon = self.args.get("icon", 'NONE')
if (not text) and (self.icon == 'NONE'):
return {'CANCELLED'}
border_w = 8*2
icon_w = (0 if (self.icon == 'NONE') else 16)
w_incr = border_w + icon_w
width = self.args.get("width", 300) - border_w
self.lines = []
max_x = cgl.text.split_text(width, icon_w, 0, text, self.lines, font=0)
width = max_x + border_w
self.spacing = self.args.get("spacing", 0.5)
self.spacing = max(self.spacing, 0.0)
wm = context.window_manager
confirm = self.args.get("confirm", False)
if confirm:
return wm.invoke_props_dialog(self, width)
else:
return wm.invoke_popup(self, width)
def draw(self, context):
layout = self.layout
col = layout.column()
col.scale_y = 0.5 * (1.0 + self.spacing * 0.5)
icon = self.icon
for line in self.lines:
if icon != 'NONE': line = " "+line
col.label(text=line, icon=icon)
icon = 'NONE'
bpy.utils.register_class(WM_OT_messagebox) # REGISTER
def messagebox(text, icon='NONE', width=300, confirm=False, spacing=0.5):
"""
Displays a message box with the given text and icon.
text -- the messagebox's text
icon -- the icon (displayed at the start of the text)
Defaults to 'NONE' (no icon).
width -- the messagebox's max width
Defaults to 300 pixels.
confirm -- whether to display "OK" button (this is purely
cosmetical, as the message box is non-blocking).
Defaults to False.
spacing -- relative distance between the lines
Defaults to 0.5.
"""
WM_OT_messagebox = bpy.types.WM_OT_messagebox
WM_OT_messagebox.args["text"] = text
WM_OT_messagebox.args["icon"] = icon
WM_OT_messagebox.args["width"] = width
WM_OT_messagebox.args["spacing"] = spacing
WM_OT_messagebox.args["confirm"] = confirm
bpy.ops.wm.messagebox('INVOKE_DEFAULT')
#============================================================================#
# Note:
# if item is property group instance and item["pi"] = 3.14,
# in UI it should be displayed like this: layout.prop(item, '["pi"]')
# ===== NESTED LAYOUT ===== #
class NestedLayout:
"""
Utility for writing more structured UI drawing code.
Attention: layout properties are propagated to sublayouts!
Example:
def draw(self, context):
layout = NestedLayout(self.layout, self.bl_idname)
exit_layout = True
# You can use both the standard way:
sublayout = layout.split()
sublayout.label("label A")
sublayout.label("label B")
# And the structured way:
with layout:
layout.label("label 1")
if exit_layout: layout.exit()
layout.label("label 2") # won't be executed
with layout.row(True)["main"]:
layout.label("label 3")
with layout.row(True)(enabled=False):
layout.label("label 4")
if exit_layout: layout.exit("main")
layout.label("label 5") # won't be executed
layout.label("label 6") # won't be executed
with layout.fold("Foldable micro-panel", "box"):
if layout.folded: layout.exit()
layout.label("label 7")
with layout.fold("Foldable 2"):
layout.label("label 8") # not drawn if folded
"""
_sub_names = {"row", "column", "column_flow", "box", "split", "menu_pie"}
_default_attrs = dict(
active = True,
alert = False,
alignment = 'EXPAND',
enabled = True,
operator_context = 'INVOKE_DEFAULT',
scale_x = 1.0,
scale_y = 1.0,
)
def __new__(cls, layout, idname="", parent=None):
"""
Wrap the layout in a NestedLayout.
To avoid interference with other panels' foldable
containers, supply panel's bl_idname as the idname.
"""
if isinstance(layout, cls) and (layout._idname == idname): return layout
self = object.__new__(cls)
self._idname = idname
self._parent = parent
self._layout = layout
self._stack = [self]
self._attrs = dict(self._default_attrs)
self._tag = None
# propagate settings to sublayouts
if parent: self(**parent._stack[-1]._attrs)
return self
def __getattr__(self, name):
layout = self._stack[-1]._layout
if not layout:
# This is the dummy layout; imitate normal layout
# behavior without actually drawing anything.
if name in self._sub_names:
return (lambda *args, **kwargs: NestedLayout(None, self._idname, self))
else:
return self._attrs.get(name, self._dummy_callable)
if name in self._sub_names:
func = getattr(layout, name)
return (lambda *args, **kwargs: NestedLayout(func(*args, **kwargs), self._idname, self))
else:
return getattr(layout, name)
def __setattr__(self, name, value):
if name.startswith("_"):
self.__dict__[name] = value
else:
wrapper = self._stack[-1]
wrapper._attrs[name] = value
if wrapper._layout: setattr(wrapper._layout, name, value)
def __call__(self, **kwargs):
"""Batch-set layout attributes."""
wrapper = self._stack[-1]
wrapper._attrs.update(kwargs)
layout = wrapper._layout
if layout:
for k, v in kwargs.items():
setattr(layout, k, v)
return self
@staticmethod
def _dummy_callable(*args, **kwargs):
return NestedLayout._dummy_obj
_dummy_obj = DummyObject()
# ===== FOLD (currently very hacky) ===== #
# Each foldable micropanel needs to store its fold-status
# as a Bool property (in order to be clickable in the UI)
# somewhere where it would be saved with .blend, but won't
# be affected by most of the other things (i.e., in Screen).
# At first I thought to implement such storage with
# nested dictionaries, but currently layout.prop() does
# not recognize ID-property dictionaries as a valid input.
class FoldPG(bpy.types.PropertyGroup):
def update(self, context):
pass # just indicates that the widget needs to be force-updated
value = bpy.props.BoolProperty(description="Fold/unfold", update=update, name="")
bpy.utils.register_class(FoldPG) # REGISTER
# make up some name that's unlikely to be used by normal addons
folds_keyname = "dairin0d_ui_utils_NestedLayout_ui_folds"
setattr(bpy.types.Screen, folds_keyname, bpy.props.CollectionProperty(type=FoldPG)) # REGISTER
folded = False # stores folded status from the latest fold() call
def fold(self, text, container=None, folded=False, key=None):
"""
Create a foldable container.
text -- the container's title/label
container -- a sequence (type_of_container, arg1, ..., argN)
where type_of_container is one of {"row", "column",
"column_flow", "box", "split"}; arg1..argN are the
arguments of the corresponding container function.
If you supply just the type_of_container, it would be
interpreted as (type_of_container,).
folded -- whether the container should be folded by default.
Default value is False.
key -- the container's unique identifier within the panel.
If not specified, the container's title will be used
in its place.
"""
data_path = "%s:%s" % (self._idname, key or text)
folds = getattr(bpy.context.screen, self.folds_keyname)
try:
this_fold = folds[data_path]
except KeyError:
this_fold = folds.add()
this_fold.name = data_path
this_fold.value = folded
is_fold = this_fold.value
icon = ('DOWNARROW_HLT' if not is_fold else 'RIGHTARROW')
# make the necessary container...
if not container:
container_args = ()
container = "column"
elif isinstance(container, str):
container_args = ()
else:
container_args = container[1:]
container = container[0]
res = getattr(self, container)(*container_args)
with res.row(True)(alignment='LEFT'):
res.prop(this_fold, "value", text=text, icon=icon, emboss=False, toggle=True)
# make fold-status accessible to the calling code
self.__dict__["folded"] = is_fold
# If folded, return dummy layout
if is_fold: return NestedLayout(None, self._idname, self)
return res
# ===== BUTTON (currently very hacky) ===== #
_button_registrator = None
def button(self, callback, *args, tooltip=None, **kwargs):
"""Draw a dynamic button. Callback and tooltip are expected to be stable."""
registrator = self._button_registrator
op_idname = (registrator.get(callback, tooltip) if registrator else None)
if not op_idname: op_idname = "wm.dynamic_button_dummy"
return self.operator(op_idname, *args, **kwargs)
# ===== NESTED CONTEXT MANAGEMENT ===== #
class ExitSublayout(Exception):
def __init__(self, tag=None):
self.tag = tag
@classmethod
def exit(cls, tag=None):
"""Jump out of current (or marked with the given tag) layout's context."""
raise cls.ExitSublayout(tag)
def __getitem__(self, tag):
"""Mark this layout with the tag"""
self._tag = tag
return self
def __enter__(self):
# Only nested (context-managed) layouts are stored in stack
parent = self._parent
if parent: parent._stack.append(self)
def __exit__(self, type, value, traceback):
# Only nested (context-managed) layouts are stored in stack
parent = self._parent
if parent: parent._stack.pop()
if type == self.ExitSublayout:
# Is this the layout the exit() was requested for?
# Yes: suppress the exception. No: let it propagate to the parent.
return (value.tag is None) or (value.tag == self._tag)
if not hasattr(bpy.types, "WM_OT_dynamic_button_dummy"):
class WM_OT_dynamic_button_dummy(bpy.types.Operator):
bl_idname = "wm.dynamic_button_dummy"
bl_label = " "
bl_description = ""
bl_options = {'INTERNAL'}
arg = bpy.props.StringProperty()
def execute(self, context):
return {'CANCELLED'}
def invoke(self, context, event):
return {'CANCELLED'}
bpy.utils.register_class(WM_OT_dynamic_button_dummy)
class DynamicButton:
def __init__(self, id):
self.age = 0
self.id = id
def register(self, btn_info):
data_path, callback, tooltip = btn_info
if not callback:
def execute(self, context):
return {'CANCELLED'}
def invoke(self, context, event):
return {'CANCELLED'}
elif data_path:
full_path_resolve = BlRna.full_path_resolve
def execute(self, context):
_self = full_path_resolve(data_path)
return ({'CANCELLED'} if callback(_self, context, None, self.arg) is False else {'FINISHED'})
def invoke(self, context, event):
_self = full_path_resolve(data_path)
return ({'CANCELLED'} if callback(_self, context, event, self.arg) is False else {'FINISHED'})
else:
def execute(self, context):
return ({'CANCELLED'} if callback(context, None, self.arg) is False else {'FINISHED'})
def invoke(self, context, event):
return ({'CANCELLED'} if callback(context, event, self.arg) is False else {'FINISHED'})
self.op_idname = "wm.dynamic_button_%s" % self.id
self.op_class = type("WM_OT_dynamic_button_%s" % self.id, (bpy.types.Operator,), dict(
bl_idname = self.op_idname,
bl_label = "",
bl_description = tooltip,
bl_options = {'INTERNAL'},
arg = bpy.props.StringProperty(),
execute = execute,
invoke = invoke,
))
bpy.utils.register_class(self.op_class)
def unregister(self):
bpy.utils.unregister_class(self.op_class)
class ButtonRegistrator:
max_age = 2
def __init__(self):
self.update_counter = 0
self.layout_counter = 0
self.free_ids = []
self.to_register = set()
self.to_unregister = set()
self.registered = {}
def register_button(self, btn_info):
if self.free_ids:
btn_id = self.free_ids.pop()
else:
btn_id = len(self.registered)
btn = DynamicButton(btn_id)
btn.register(btn_info)
self.registered[btn_info] = btn
def unregister_button(self, btn_info):
btn = self.registered.pop(btn_info)
self.free_ids.append(btn.id)
btn.unregister()
def update(self):
if self.to_unregister:
for btn_info in self.to_unregister:
self.unregister_button(btn_info)
self.to_unregister.clear()
if self.to_register:
for btn_info in self.to_register:
self.register_button(btn_info)
self.to_register.clear()
self.update_counter += 1
def increment_age(self):
for btn_info, btn in self.registered.items():
btn.age += 1
if btn.age > self.max_age:
self.to_unregister.add(btn_info)
def get(self, callback, tooltip):
if self.layout_counter != self.update_counter:
self.layout_counter = self.update_counter
self.increment_age()
if not callback:
if not tooltip: return None
btn_info = (None, None, tooltip)
else:
if tooltip is None: tooltip = (callback.__doc__ or "") # __doc__ can be None
callback_self = getattr(callback, "__self__", None)
if isinstance(callback_self, bpy.types.PropertyGroup):
# we cannot keep reference to this object, only the data path
full_path = BlRna.full_path(callback_self)
btn_info = (full_path, callback.__func__, tooltip)
else:
btn_info = (None, callback, tooltip)
btn = self.registered.get(btn_info)
if btn:
btn.age = 0
return btn.op_idname
self.to_register.add(btn_info)
#============================================================================#
# TODO: put all these into BlUI class?
def tag_redraw(arg=None):
"""A utility function to tag redraw of arbitrary UI units."""
if arg is None:
arg = bpy.context.window_manager
elif isinstance(arg, bpy.types.Window):
arg = arg.screen
if isinstance(arg, bpy.types.Screen):
for area in arg.areas:
area.tag_redraw()
elif isinstance(arg, bpy.types.WindowManager):
for window in arg.windows:
for area in window.screen.areas:
area.tag_redraw()
else: # Region, Area, RenderEngine
arg.tag_redraw()
def calc_region_rect(area, r, overlap=True):
# Note: there may be more than one region of the same type (e.g. in quadview)
if (not overlap) and (r.type == 'WINDOW'):
x0, y0, x1, y1 = r.x, r.y, r.x+r.width, r.y+r.height
ox0, oy0, ox1, oy1 = x0, y0, x1, y1
for r in area.regions:
if r.type == 'TOOLS':
ox0 = r.x + r.width
elif r.type == 'UI':
ox1 = r.x
x0, y0, x1, y1 = max(x0, ox0), max(y0, oy0), min(x1, ox1), min(y1, oy1)
return (Vector((x0, y0)), Vector((x1-x0, y1-y0)))
else:
return (Vector((r.x, r.y)), Vector((r.width, r.height)))
def point_in_rect(p, r):
return ((p[0] >= r.x) and (p[0] < r.x + r.width) and (p[1] >= r.y) and (p[1] < r.y + r.height))
def rv3d_from_region(area, region):
if (area.type != 'VIEW_3D') or (region.type != 'WINDOW'): return None
space_data = area.spaces.active
try:
quadviews = space_data.region_quadviews
except AttributeError:
quadviews = None # old API
if not quadviews: return space_data.region_3d
x_id = 0
y_id = 0
for r in area.regions:
if (r.type == 'WINDOW') and (r != region):
if r.x < region.x: x_id = 1
if r.y < region.y: y_id = 1
# 0: bottom left (Front Ortho)
# 1: top left (Top Ortho)
# 2: bottom right (Right Ortho)
# 3: top right (User Persp)
return quadviews[y_id | (x_id << 1)]
# areas can't overlap, but regions can
def ui_contexts_under_coord(x, y, window=None):
point = int(x), int(y)
if not window: window = bpy.context.window
screen = window.screen
scene = screen.scene
tool_settings = scene.tool_settings
for area in screen.areas:
if point_in_rect(point, area):
space_data = area.spaces.active
for region in area.regions:
if point_in_rect(point, region):
yield dict(window=window, screen=screen,
area=area, space_data=space_data, region=region,
region_data=rv3d_from_region(area, region),
scene=scene, tool_settings=tool_settings)
break
def ui_context_under_coord(x, y, index=0, window=None):
ui_context = None
for i, ui_context in enumerate(ui_contexts_under_coord(x, y, window)):
if i == index: return ui_context
return ui_context
def find_ui_area(area_type, region_type='WINDOW', window=None):
if not window: window = bpy.context.window
screen = window.screen
scene = screen.scene
tool_settings = scene.tool_settings
for area in screen.areas:
if area.type == area_type:
space_data = area.spaces.active
region = None
for _region in area.regions:
if _region.type == region_type: region = _region
return dict(window=window, screen=screen,
area=area, space_data=space_data, region=region,
region_data=rv3d_from_region(area, region),
scene=scene, tool_settings=tool_settings)
def ui_hierarchy(ui_obj):
if isinstance(ui_obj, bpy.types.Window):
return (ui_obj, None, None)
elif isinstance(ui_obj, bpy.types.Area):
wm = bpy.context.window_manager
for window in wm.windows:
for area in window.screen.areas:
if area == ui_obj: return (window, area, None)
elif isinstance(ui_obj, bpy.types.Region):
wm = bpy.context.window_manager
for window in wm.windows:
for area in window.screen.areas:
for region in area.regions:
if region == ui_obj: return (window, area, region)
# TODO: relative coords?
def convert_ui_coord(area, region, xy, src, dst, vector=True):
x, y = xy
if src == dst:
pass
elif src == 'WINDOW':
if dst == 'AREA':
x -= area.x
y -= area.y
elif dst == 'REGION':
x -= region.x
y -= region.y
elif src == 'AREA':
if dst == 'WINDOW':
x += area.x
y += area.y
elif dst == 'REGION':
x += area.x - region.x
y += area.y - region.y
elif src == 'REGION':
if dst == 'WINDOW':
x += region.x
y += region.y
elif dst == 'AREA':
x += region.x - area.x
y += region.y - area.y
return (Vector((x, y)) if vector else (int(x), int(y)))
#============================================================================#
| gpl-3.0 | 6,859,048,113,327,889,000 | 35.690939 | 110 | 0.557089 | false | 3.954482 | false | false | false |
RossLote/cloudplayer | music/migrations/0001_initial.py | 1 | 4646 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cloudplayer', '0002_file_play_count'),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Composer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Playlist',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PlaylistTrack',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.PositiveSmallIntegerField(default=0)),
('playlist', models.ForeignKey(to='music.Playlist')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Track',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('rating', models.FloatField(default=0.0)),
('track_number', models.PositiveSmallIntegerField(default=0)),
('of_tracks', models.PositiveSmallIntegerField(default=0)),
('disk_number', models.PositiveSmallIntegerField(default=0)),
('of_disks', models.PositiveSmallIntegerField(default=0)),
('date_added', models.DateTimeField(auto_now_add=True)),
('last_played', models.DateTimeField(default=None, null=True)),
('play_count', models.PositiveIntegerField(default=0)),
('album', models.ForeignKey(default=None, to='music.Album', null=True)),
('artist', models.ForeignKey(default=None, to='music.Artist', null=True)),
('composer', models.ForeignKey(default=None, to='music.Composer', null=True)),
('file', models.ForeignKey(to='cloudplayer.File')),
('genre', models.ForeignKey(default=None, to='music.Genre', null=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='playlisttrack',
name='track',
field=models.ForeignKey(to='music.Track'),
preserve_default=True,
),
migrations.AddField(
model_name='playlist',
name='tracks',
field=models.ManyToManyField(to='music.Track', through='music.PlaylistTrack'),
preserve_default=True,
),
migrations.AddField(
model_name='playlist',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
| mit | 7,446,969,337,703,704,000 | 38.042017 | 114 | 0.525183 | false | 4.716751 | false | false | false |
bkfunk/Quilltery | app/__init__.py | 1 | 2879 | from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.sqlalchemy import SQLAlchemy
import os
from flask.ext.login import LoginManager
from flask.ext.openid import OpenID
from flask.ext.admin import Admin
from config import basedir, ADMINS, MAIL_SERVER, MAIL_PORT, MAIL_USERNAME, MAIL_PASSWORD
from flask_misaka import Misaka
from app.renderers import QuilltRenderer
app = Flask(__name__)
app.config.from_object('config')
Bootstrap(app)
db = SQLAlchemy(app)
m = Misaka(app, QuilltRenderer())
print(m.render("This is a test (r) [[test]]"))
#lm = LoginManager()
#lm.init_app(app)
#lm.login_view = 'login'
#oid = OpenID(app, os.path.join(basedir, 'tmp'))
if not app.debug:
import logging
from logging.handlers import SMTPHandler
credentials = None
if MAIL_USERNAME or MAIL_PASSWORD:
credentials = (MAIL_USERNAME, MAIL_PASSWORD)
mail_handler = SMTPHandler((MAIL_SERVER, MAIL_PORT),
'no-reply@' + MAIL_SERVER, ADMINS,
'microblog failure', credentials)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if not app.debug:
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler('tmp/microblog.log',
'a', 1 * 1024 * 1024, 10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('microblog startup')
from app import views, models
admin = Admin(app, name = "AppAdmin")
#admin.add_view(views.AdminView(models.Post, db.session))
# Testing
if not models.User.query.all():
u = models.User(nickname='john', email='[email protected]', role=models.ROLE_USER)
db.session.add(u)
db.session.commit()
if not models.Quillt.query.all():
q1 = models.Quillt(title='Quillt 1', user_id = models.User.query.first().id)
q2 = models.Quillt(title='Quillt 2', user_id = models.User.query.first().id)
db.session.add(q1)
db.session.add(q2)
db.session.commit()
text = """# The Beginning #
It began in the beginning. In the _beginnning_. It was the beginning when it began.
I suppose I'll start where it starts, at the end. The end is always where it starts of course. Even the end starts at the end. The question is, where does the end _end_?
He told me, ``Hello, son.''
``What?'' I said. I was pretty dumb about these things.
[[Go to the store]]
[[Leave town]]
[[also]]
[A custom link](thisisacustomlink.com "TITLE CUSTOM")
"""
if not models.Passage.query.all():
p1 = models.Passage(title='A great passage',
body=text, quillt_id = models.Quillt.query.get(1).id)
db.session.add(p1)
db.session.commit()
| gpl-2.0 | -3,894,619,358,932,330,500 | 30.988889 | 169 | 0.688086 | false | 3.275313 | false | true | false |
bjss/BJSS_liveobs_automation | liveobs_ui/selectors/mobile/list.py | 2 | 1120 | """
Selectors for List Component
The list component is used in the mobile to show the list of tasks and the list
of patients
"""
from selenium.webdriver.common.by import By
LIST_CONTAINER = (By.CLASS_NAME, 'tasklist')
LIST_ITEM = (By.CSS_SELECTOR, '.tasklist li a')
UNKNOWN_CLINICAL_RISK_LIST_ITEM = \
(By.CSS_SELECTOR, '.tasklist li a.level-not-set')
LOW_CLINICAL_RISK_LIST_ITEM = (By.CSS_SELECTOR, '.tasklist li a.level-one')
MEDIUM_CLINICAL_RISK_LIST_ITEM = (By.CSS_SELECTOR, '.tasklist li a.level-two')
HIGH_CLINICAL_RISK_LIST_ITEM = (By.CSS_SELECTOR, '.tasklist li a.level-three')
STATUS_LIST_ITEM = (By.CSS_SELECTOR, '.tasklist li.status-alert')
STATUS_LIST_ITEM_FLAG = \
(By.CSS_SELECTOR, '.tasklist li.status-alert .status-flag')
LIST_ITEM_DATA_ROW = (By.CSS_SELECTOR, '.tasklist li a .task-meta')
LIST_ITEM_DATA_LEFT = (By.CSS_SELECTOR, '.tasklist li a .task-meta .task-left')
LIST_ITEM_DATA_RIGHT = \
(By.CSS_SELECTOR, '.tasklist li a .task-meta .task-right')
LIST_ITEM_DATA_INFO = (By.CSS_SELECTOR, '.task-meta .taskInfo')
LIST_ITEM_DATA_NAME = (By.CSS_SELECTOR, '.task-meta .task-left strong')
| gpl-3.0 | 3,052,244,055,259,454,000 | 45.666667 | 79 | 0.713393 | false | 2.8 | false | false | false |
anchor/make-magic | tools/showtask.py | 1 | 2514 | #! /usr/bin/env python
'''monitor a task in realtime
This should use the HTTP interface. It currently doesn't
'''
title = "make-magic stat"
from gi.repository import Gtk as gtk
from gi.repository import Gdk as gdk
from gi.repository import GObject
import sys
import traceback
import lib.magic
# Deal with keyboard interrupts gracefully
def exc_handler(exc, val, tb):
if exc != KeyboardInterrupt:
traceback.print_exception(exc, val, tb)
sys.exit()
else:
gtk.main_quit()
sys.excepthook = exc_handler
class ShowStuff(object):
def __init__(self, stuff, update_interval=1000):
self.window = window = gtk.Window()
window.set_title(title)
window.connect('destroy', lambda win: gtk.main_quit())
window.set_border_width(5)
window.resize(500,300)
sw = gtk.ScrolledWindow()
window.add(sw)
vp = gtk.Viewport()
sw.add(vp)
vbox = self.vbox = gtk.VBox(spacing=0)
vp.add(vbox)
#window.add(vbox)
self.labels = {}
self.frames = {}
self.ebox = {}
for k,v,col in stuff:
eb = gtk.EventBox()
f = gtk.Frame(label=k)
l = gtk.Label(v)
l.set_alignment(0,.5)
#l.set_justification(gtk.Justification.LEFT)
f.add(l)
eb.add(f)
self.labels[k] = l
self.frames[k] = f
self.ebox[k] = eb
vbox.pack_start(eb, True, True, 0)
if col: self.set_color(k,col)
window.show_all()
GObject.timeout_add(update_interval, self.update_stuff)
def set_color(self, k, col):
self.ebox[k].modify_bg(gtk.StateType.NORMAL, gdk.color_parse(col))
def update_stuff(self):
print "update timer"
return True
class MonitorTask(ShowStuff):
def __init__(self, uuid, interval=1000):
self.magic = lib.magic.Magic()
self.uuid = uuid
ShowStuff.__init__(self, self.get_task_tuples(), interval)
self.window.set_title("make-magic task: "+uuid)
def get_task_tuples(self):
rtor = self.magic.ready_to_run(self.uuid)
rtor = ", ".join(item['name'] for item in rtor)
yield(("Ready to run", rtor, "lightblue"))
task = self.magic.get_task(self.uuid)
for item in task['items']:
color = "green" if item['state'] == 'COMPLETE' else None
item.pop('depends',None)
desc = ', '.join(str(k)+": "+str(v) for k,v in item.items())
yield (item['name'], desc, color)
def update_stuff(self):
for k,v,col in self.get_task_tuples():
if col:
self.set_color(k,col)
if self.labels[k].get_text() != v:
self.labels[k].set_text(v)
return True
def monitor_task(uuid):
mt = MonitorTask(uuid,100)
gtk.main()
if __name__ == "__main__":
monitor_task(sys.argv[1])
| bsd-3-clause | -2,601,557,388,117,879,000 | 24.393939 | 68 | 0.665076 | false | 2.694534 | false | false | false |
mrrudy/esp2homie2openhab | tools/mqttConvert2retained.py | 1 | 1215 | import paho.mqtt.client as mqtt
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# client.subscribe("DEBUG/pythonscript")
client.subscribe("homie/5ccf7f2c12d1/desiredTemp/degrees/set")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
if msg.retain==False:
print("addint retain to: "+msg.topic+" "+str(msg.payload))
client.unsubscribe(msg.topic)
client.publish(msg.topic, payload=msg.payload, qos=0, retain=True)
client.subscribe(msg.topic)
else:
print("message already has retain flag")
return;
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("192.168.1.25", 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
| gpl-3.0 | -624,525,163,891,914,400 | 35.818182 | 79 | 0.725926 | false | 3.626866 | false | false | false |
aplanas/hackweek11 | repo-log.py | 1 | 1704 | #! /usr/bin/env python
import argparse
import csv
import git
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Extract git history information.')
parser.add_argument('-f', '--from', dest='from_', help='from revno')
parser.add_argument('-t', '--to', help='to revno')
parser.add_argument('-l', '--limit', help='max number of commits')
parser.add_argument('-p', '--project', help='project directory')
parser.add_argument('-r', '--git-repository', dest='project', help='project directory')
parser.add_argument('-c', '--csv', help='csv file name')
args = parser.parse_args()
if not args.csv or not args.project:
parser.print_help()
exit(1)
if not args.to and not args.limit:
parser.print_help()
exit(1)
with open(args.csv, 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"', doublequote=True)
repo = git.Repo(args.project)
if args.to:
args.to = repo.commit(args.to).hexsha
if args.limit:
iter_ = repo.iter_commits(args.from_, max_count=args.limit, no_merges=True)
else:
iter_ = repo.iter_commits(args.from_, no_merges=True)
for commit in iter_:
if commit.hexsha == args.to:
break
summary = commit.summary.encode('utf-8')
message = commit.message.encode('utf-8')
stats = commit.stats.total
csvwriter.writerow((summary, message, commit.hexsha,
stats['files'], stats['lines'],
stats['insertions'],
stats['deletions']))
| mit | 4,325,897,972,822,850,000 | 33.77551 | 91 | 0.565141 | false | 3.908257 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.