repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
BasioMeusPuga/pyGames | Snake.py | 1 | 4991 | #!/usr/bin/python3
import sys
import random
import pygame
import os.path
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
FPS = 60
pygame.init()
displaysurface = pygame.display.set_mode((400, 400))
displaysurface.fill(WHITE)
pygame.display.set_caption('Ghatiya snake')
clock = pygame.time.Clock()
my_dir = os.path.dirname(os.path.realpath(__file__))
food_sound = pygame.mixer.Sound(my_dir + '/resources/ping.wav')
oooh_sound = pygame.mixer.Sound(my_dir + '/resources/oooh.wav')
class GameState:
score = 0
speed_multiplier = 1
paused = False
game_over = False
keylock = False
class Food:
def __init__(self):
self.foodx = 105
self.foody = 105
def new_food(self):
self.foodx = random.randrange(15, 386, 10)
self.foody = random.randrange(15, 386, 10)
class Snake:
def __init__(self):
self.segments = [[15, 5], [5, 5]]
self.direction = 'RIGHT'
def direction_update(self, direction_input):
if self.direction == 'RIGHT' and direction_input == 'LEFT' or \
self.direction == 'LEFT' and direction_input == 'RIGHT' or \
self.direction == 'DOWN' and direction_input == 'UP' or \
self.direction == 'UP' and direction_input == 'DOWN':
pass
else:
if GameState.keylock is False:
self.direction = direction_input
GameState.keylock = True
def embiggen(self):
self.segments.append(self.segments[-1])
snek = Snake()
chow = Food()
def draw_me_like_one_of_your_french_girls():
displaysurface.fill(WHITE)
# Score
font = pygame.font.SysFont('calibri', 20, bold=True)
text = font.render(str(GameState.score), True, (0, 128, 0))
x_c = len(str(GameState.score))
text_rect = text.get_rect(center=(385 - x_c, 10))
displaysurface.blit(text, text_rect)
# Food
pygame.draw.rect(displaysurface, GREEN, [chow.foodx, chow.foody, 10, 10])
# Snake
for i in snek.segments:
pygame.draw.rect(displaysurface, BLACK, [i[0], i[1], 10, 10])
def snake_update():
if snek.direction == 'UP':
x_mult = 0
y_mult = -1
if snek.direction == 'DOWN':
x_mult = 0
y_mult = 1
if snek.direction == 'LEFT':
x_mult = -1
y_mult = 0
if snek.direction == 'RIGHT':
x_mult = 1
y_mult = 0
new_snek = [[snek.segments[0][0] + (10 * x_mult), snek.segments[0][1] + (10 * y_mult)]]
GameState.keylock = False
for i in snek.segments[:-1]:
new_snek.append(i)
snek.segments = new_snek
# Collisions
snake_head = snek.segments[0]
if snake_head in snek.segments[1:] or \
(snake_head[0] < 5 or snake_head[0] > 395) or \
(snake_head[1] < 5 or snake_head[1] > 395):
oooh_sound.play()
GameState.game_over = True
def make_it_grow():
snek.embiggen()
chow.new_food()
GameState.score += 1
GameState.speed_multiplier = int(GameState.score / 5)
food_sound.play()
if snake_head == [chow.foodx, chow.foody]:
make_it_grow()
draw_me_like_one_of_your_french_girls()
def main():
while True:
if GameState.game_over is False:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
snek.direction_update('UP')
elif event.key == pygame.K_DOWN:
snek.direction_update('DOWN')
elif event.key == pygame.K_LEFT:
snek.direction_update('LEFT')
elif event.key == pygame.K_RIGHT:
snek.direction_update('RIGHT')
elif event.key == pygame.K_RETURN:
# Toggle Pause
if GameState.paused is True:
GameState.paused = False
elif GameState.paused is False:
GameState.paused = True
elif event.key == pygame.K_ESCAPE or event.key == pygame.K_q:
pygame.quit()
sys.exit()
elif event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if GameState.paused is False:
snake_update()
pygame.time.wait((100 - 10 * GameState.speed_multiplier))
else:
displaysurface.fill(WHITE)
font = pygame.font.SysFont('calibri', 60, bold=True)
text = font.render('PAUSED', True, RED)
text_rect = text.get_rect(center=(200, 200))
displaysurface.blit(text, text_rect)
elif GameState.game_over is True:
# Draw the head in red on getting a game over
snake_head = snek.segments[0]
pygame.draw.rect(displaysurface, RED, [snake_head[0], snake_head[1], 10, 10])
# Game over message
font = pygame.font.SysFont('calibri', 60, bold=True)
text = font.render('GAME OVER', True, RED)
text_rect = text.get_rect(center=(200, 200))
displaysurface.blit(text, text_rect)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
# Reset all variables and restart the game on pressing r
GameState.game_over = False
GameState.score = 0
GameState.speed_multiplier = 1
snek.__init__()
chow.__init__()
main()
elif event.key == pygame.K_ESCAPE or event.key == pygame.K_q:
pygame.quit()
sys.exit()
elif event.type == pygame.QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
clock.tick(FPS)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,223,128,494,537,172,000 | 24.335025 | 88 | 0.64436 | false | 2.680451 | false | false | false |
uclmr/inferbeddings | inferbeddings/nli/disan/disan.py | 1 | 10785 | # -*- coding: utf-8 -*-
import tensorflow as tf
from functools import reduce
from operator import mul
VERY_BIG_NUMBER = 1e30
VERY_SMALL_NUMBER = 1e-30
VERY_POSITIVE_NUMBER = VERY_BIG_NUMBER
VERY_NEGATIVE_NUMBER = -VERY_BIG_NUMBER
# --------------- DiSAN Interface ----------------
def disan(rep_tensor, rep_mask, scope=None,
keep_prob=1., is_train=None, wd=0., activation='elu',
tensor_dict=None, name=''):
with tf.variable_scope(scope or 'DiSAN'):
with tf.variable_scope('ct_attn'):
fw_res = directional_attention_with_dense(
rep_tensor, rep_mask, 'forward', 'dir_attn_fw',
keep_prob, is_train, wd, activation,
tensor_dict=tensor_dict, name=name+'_fw_attn')
bw_res = directional_attention_with_dense(
rep_tensor, rep_mask, 'backward', 'dir_attn_bw',
keep_prob, is_train, wd, activation,
tensor_dict=tensor_dict, name=name+'_bw_attn')
seq_rep = tf.concat([fw_res, bw_res], -1)
with tf.variable_scope('sent_enc_attn'):
sent_rep = multi_dimensional_attention(
seq_rep, rep_mask, 'multi_dimensional_attention',
keep_prob, is_train, wd, activation,
tensor_dict=tensor_dict, name=name+'_attn')
return sent_rep
# --------------- supporting networks ----------------
def directional_attention_with_dense(rep_tensor, rep_mask, direction=None, scope=None,
keep_prob=1., is_train=None, wd=0., activation='elu',
tensor_dict=None, name=None):
def scaled_tanh(x, scale=5.):
return scale * tf.nn.tanh(1./scale * x)
bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2]
ivec = rep_tensor.get_shape()[2]
with tf.variable_scope(scope or 'directional_attention_%s' % direction or 'diag'):
# mask generation
sl_indices = tf.range(sl, dtype=tf.int32)
sl_col, sl_row = tf.meshgrid(sl_indices, sl_indices)
if direction is None:
direct_mask = tf.cast(tf.diag(- tf.ones([sl], tf.int32)) + 1, tf.bool)
else:
if direction == 'forward':
direct_mask = tf.greater(sl_row, sl_col)
else:
direct_mask = tf.greater(sl_col, sl_row)
direct_mask_tile = tf.tile(tf.expand_dims(direct_mask, 0), [bs, 1, 1]) # bs,sl,sl
rep_mask_tile = tf.tile(tf.expand_dims(rep_mask, 1), [1, sl, 1]) # bs,sl,sl
attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile) # bs,sl,sl
# non-linear
rep_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation,
False, wd, keep_prob, is_train)
rep_map_tile = tf.tile(tf.expand_dims(rep_map, 1), [1, sl, 1, 1]) # bs,sl,sl,vec
rep_map_dp = dropout(rep_map, keep_prob, is_train)
# attention
with tf.variable_scope('attention'): # bs,sl,sl,vec
f_bias = tf.get_variable('f_bias',[ivec], tf.float32, tf.constant_initializer(0.))
dependent = linear(rep_map_dp, ivec, False, scope='linear_dependent') # bs,sl,vec
dependent_etd = tf.expand_dims(dependent, 1) # bs,1,sl,vec
head = linear(rep_map_dp, ivec, False, scope='linear_head') # bs,sl,vec
head_etd = tf.expand_dims(head, 2) # bs,sl,1,vec
logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,sl,sl,vec
logits_masked = exp_mask_for_high_rank(logits, attn_mask)
attn_score = tf.nn.softmax(logits_masked, 2) # bs,sl,sl,vec
attn_score = mask_for_high_rank(attn_score, attn_mask)
attn_result = tf.reduce_sum(attn_score * rep_map_tile, 2) # bs,sl,vec
with tf.variable_scope('output'):
o_bias = tf.get_variable('o_bias',[ivec], tf.float32, tf.constant_initializer(0.))
# input gate
fusion_gate = tf.nn.sigmoid(
linear(rep_map, ivec, True, 0., 'linear_fusion_i', False, wd, keep_prob, is_train) +
linear(attn_result, ivec, True, 0., 'linear_fusion_a', False, wd, keep_prob, is_train) +
o_bias)
output = fusion_gate * rep_map + (1-fusion_gate) * attn_result
output = mask_for_high_rank(output, rep_mask)
# save attn
if tensor_dict is not None and name is not None:
tensor_dict[name + '_dependent'] = dependent
tensor_dict[name + '_head'] = head
tensor_dict[name] = attn_score
tensor_dict[name + '_gate'] = fusion_gate
return output
def multi_dimensional_attention(rep_tensor, rep_mask, scope=None,
keep_prob=1., is_train=None, wd=0., activation='elu',
tensor_dict=None, name=None):
bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2]
ivec = rep_tensor.get_shape()[2]
with tf.variable_scope(scope or 'multi_dimensional_attention'):
map1 = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map1', activation,
False, wd, keep_prob, is_train)
map2 = bn_dense_layer(map1, ivec, True, 0., 'bn_dense_map2', 'linear',
False, wd, keep_prob, is_train)
map2_masked = exp_mask_for_high_rank(map2, rep_mask)
soft = tf.nn.softmax(map2_masked, 1) # bs,sl,vec
attn_output = tf.reduce_sum(soft * rep_tensor, 1) # bs, vec
# save attn
if tensor_dict is not None and name is not None:
tensor_dict[name] = soft
return attn_output
def bn_dense_layer(input_tensor, hn, bias, bias_start=0.0, scope=None,
activation='relu', enable_bn=True,
wd=0., keep_prob=1.0, is_train=None):
if is_train is None:
is_train = False
# activation
if activation == 'linear':
activation_func = tf.identity
elif activation == 'relu':
activation_func = tf.nn.relu
elif activation == 'elu':
activation_func = tf.nn.elu
elif activation == 'selu':
activation_func = selu
else:
raise AttributeError('no activation function named as %s' % activation)
with tf.variable_scope(scope or 'bn_dense_layer'):
linear_map = linear(input_tensor, hn, bias, bias_start, 'linear_map',
False, wd, keep_prob, is_train)
if enable_bn:
linear_map = tf.contrib.layers.batch_norm(
linear_map, center=True, scale=True, is_training=is_train, scope='bn')
return activation_func(linear_map)
def dropout(x, keep_prob, is_train, noise_shape=None, seed=None, name=None):
with tf.name_scope(name or "dropout"):
assert is_train is not None
if keep_prob < 1.0:
d = tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)
out = tf.cond(is_train, lambda: d, lambda: x)
return out
return x
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):
if args is None or (isinstance(args, (tuple, list)) and not args):
raise ValueError("`args` must be specified")
if not isinstance(args, (tuple, list)):
args = [args]
flat_args = [flatten(arg, 1) for arg in args] # for dense layer [(-1, d)]
if input_keep_prob < 1.0:
assert is_train is not None
flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)# for dense layer [(-1, d)]
for arg in flat_args]
flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start, scope=scope) # dense
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
if wd:
add_reg_without_bias()
return out
def _linear(xs,output_size,bias,bias_start=0., scope=None):
with tf.variable_scope(scope or 'linear_layer'):
x = tf.concat(xs,-1)
input_size = x.get_shape()[-1]
W = tf.get_variable('W', shape=[input_size,output_size],dtype=tf.float32,
)
if bias:
bias = tf.get_variable('bias', shape=[output_size],dtype=tf.float32,
initializer=tf.constant_initializer(bias_start))
out = tf.matmul(x, W) + bias
else:
out = tf.matmul(x, W)
return out
def flatten(tensor, keep):
fixed_shape = tensor.get_shape().as_list()
start = len(fixed_shape) - keep
left = reduce(mul, [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start)])
out_shape = [left] + [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start, len(fixed_shape))]
flat = tf.reshape(tensor, out_shape)
return flat
def reconstruct(tensor, ref, keep, dim_reduced_keep=None):
dim_reduced_keep = dim_reduced_keep or keep
ref_shape = ref.get_shape().as_list() # original shape
tensor_shape = tensor.get_shape().as_list() # current shape
ref_stop = len(ref_shape) - keep # flatten dims list
tensor_start = len(tensor_shape) - dim_reduced_keep # start
pre_shape = [ref_shape[i] or tf.shape(ref)[i] for i in range(ref_stop)] #
keep_shape = [tensor_shape[i] or tf.shape(tensor)[i] for i in range(tensor_start, len(tensor_shape))] #
# pre_shape = [tf.shape(ref)[i] for i in range(len(ref.get_shape().as_list()[:-keep]))]
# keep_shape = tensor.get_shape().as_list()[-keep:]
target_shape = pre_shape + keep_shape
out = tf.reshape(tensor, target_shape)
return out
def mask_for_high_rank(val, val_mask, name=None):
val_mask = tf.expand_dims(val_mask, -1)
return tf.multiply(val, tf.cast(val_mask, tf.float32), name=name or 'mask_for_high_rank')
def exp_mask_for_high_rank(val, val_mask, name=None):
val_mask = tf.expand_dims(val_mask, -1)
return tf.add(val, (1 - tf.cast(val_mask, tf.float32)) * VERY_NEGATIVE_NUMBER,
name=name or 'exp_mask_for_high_rank')
def selu(x):
with tf.name_scope('elu') as scope:
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale*tf.where(x>=0.0, x, alpha*tf.nn.elu(x))
def add_reg_without_bias(scope=None):
scope = scope or tf.get_variable_scope().name
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
counter = 0
for var in variables:
if len(var.get_shape().as_list()) <= 1: continue
tf.add_to_collection('reg_vars', var)
counter += 1
return counter | mit | 6,246,170,103,102,162,000 | 41.972112 | 123 | 0.583496 | false | 3.231945 | false | false | false |
okuchaiev/f-lm | glstm.py | 1 | 8571 | """Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib.compiler import jit
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
# pylint: disable=protected-access
_Linear = core_rnn_cell._Linear # pylint: disable=invalid-name
# pylint: enable=protected-access
class GLSTMCell(rnn_cell_impl.RNNCell):
"""Group LSTM cell (G-LSTM).
The implementation is based on:
https://arxiv.org/abs/1703.10722
O. Kuchaiev and B. Ginsburg
"Factorization Tricks for LSTM Networks", ICLR 2017 workshop.
"""
def __init__(self, num_units, initializer=None, num_proj=None,
number_of_groups=1, forget_bias=1.0, activation=math_ops.tanh,
reuse=None):
"""Initialize the parameters of G-LSTM cell.
Args:
num_units: int, The number of units in the G-LSTM cell
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
number_of_groups: (optional) int, number of groups to use.
If `number_of_groups` is 1, then it should be equivalent to LSTM cell
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already
has the given variables, an error is raised.
Raises:
ValueError: If `num_units` or `num_proj` is not divisible by
`number_of_groups`.
"""
super(GLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._num_proj = num_proj
self._forget_bias = forget_bias
self._activation = activation
self._number_of_groups = number_of_groups
if self._num_units % self._number_of_groups != 0:
raise ValueError("num_units must be divisible by number_of_groups")
if self._num_proj:
if self._num_proj % self._number_of_groups != 0:
raise ValueError("num_proj must be divisible by number_of_groups")
self._group_shape = [int(self._num_proj / self._number_of_groups),
int(self._num_units / self._number_of_groups)]
else:
self._group_shape = [int(self._num_units / self._number_of_groups),
int(self._num_units / self._number_of_groups)]
if num_proj:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
self._linear1 = [None] * self._number_of_groups
self._linear2 = None
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def _get_input_for_group(self, inputs, group_id, group_size):
"""Slices inputs into groups to prepare for processing by cell's groups
Args:
inputs: cell input or it's previous state,
a Tensor, 2D, [batch x num_units]
group_id: group id, a Scalar, for which to prepare input
group_size: size of the group
Returns:
subset of inputs corresponding to group "group_id",
a Tensor, 2D, [batch x num_units/number_of_groups]
"""
return array_ops.slice(input_=inputs,
begin=[0, group_id * group_size],
size=[self._batch_size, group_size],
name=("GLSTM_group%d_input_generation" % group_id))
def call(self, inputs, state):
"""Run one step of G-LSTM.
Args:
inputs: input Tensor, 2D, [batch x num_units].
state: this must be a tuple of state Tensors, both `2-D`,
with column sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
G-LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- LSTMStateTuple representing the new state of G-LSTM cell
after reading `inputs` when the previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
(c_prev, m_prev) = state
self._batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
input_size = inputs.shape[-1].value or array_ops.shape(inputs)[-1]
dtype = inputs.dtype
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer):
i_parts = []
j_parts = []
f_parts = []
o_parts = []
for group_id in range(self._number_of_groups):
with vs.variable_scope("group%d" % group_id):
x_g_id = array_ops.concat(
[self._get_input_for_group(inputs, group_id,
int(input_size / self._number_of_groups)),
#self._group_shape[0]), # this is only correct if inputs dim = num_units!!!
self._get_input_for_group(m_prev, group_id,
int(self._output_size / self._number_of_groups))], axis=1)
#self._group_shape[0])], axis=1)
if self._linear1[group_id] is None:
self._linear1[group_id] = _Linear(x_g_id, 4 * self._group_shape[1], False)
R_k = self._linear1[group_id](x_g_id) # pylint: disable=invalid-name
i_k, j_k, f_k, o_k = array_ops.split(R_k, 4, 1)
i_parts.append(i_k)
j_parts.append(j_k)
f_parts.append(f_k)
o_parts.append(o_k)
bi = vs.get_variable(name="bias_i",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
bj = vs.get_variable(name="bias_j",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
bf = vs.get_variable(name="bias_f",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
bo = vs.get_variable(name="bias_o",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
i = nn_ops.bias_add(array_ops.concat(i_parts, axis=1), bi)
j = nn_ops.bias_add(array_ops.concat(j_parts, axis=1), bj)
f = nn_ops.bias_add(array_ops.concat(f_parts, axis=1), bf)
o = nn_ops.bias_add(array_ops.concat(o_parts, axis=1), bo)
c = (math_ops.sigmoid(f + self._forget_bias) * c_prev +
math_ops.sigmoid(i) * math_ops.tanh(j))
m = math_ops.sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
if self._linear2 is None:
self._linear2 = _Linear(m, self._num_proj, False)
m = self._linear2(m)
new_state = rnn_cell_impl.LSTMStateTuple(c, m)
return m, new_state | mit | 5,373,462,395,989,063,000 | 41.435644 | 114 | 0.614397 | false | 3.757562 | false | false | false |
strazzere/py010parser | z.py | 3 | 2648 | import sys
from pycparser.c_ast import *
from pycparser.c_parser import CParser, Coord, ParseError
from pycparser.c_lexer import CLexer
def expand_decl(decl):
""" Converts the declaration into a nested list.
"""
typ = type(decl)
if typ == TypeDecl:
return ['TypeDecl', expand_decl(decl.type)]
elif typ == IdentifierType:
return ['IdentifierType', decl.names]
elif typ == ID:
return ['ID', decl.name]
elif typ in [Struct, Union]:
decls = [expand_decl(d) for d in decl.decls or []]
return [typ.__name__, decl.name, decls]
else:
nested = expand_decl(decl.type)
if typ == Decl:
if decl.quals:
return ['Decl', decl.quals, decl.name, nested]
else:
return ['Decl', decl.name, nested]
elif typ == Typename: # for function parameters
if decl.quals:
return ['Typename', decl.quals, nested]
else:
return ['Typename', nested]
elif typ == ArrayDecl:
dimval = decl.dim.value if decl.dim else ''
return ['ArrayDecl', dimval, nested]
elif typ == PtrDecl:
return ['PtrDecl', nested]
elif typ == Typedef:
return ['Typedef', decl.name, nested]
elif typ == FuncDecl:
if decl.args:
params = [expand_decl(param) for param in decl.args.params]
else:
params = []
return ['FuncDecl', params, nested]
#-----------------------------------------------------------------
class NodeVisitor(object):
def __init__(self):
self.current_parent = None
def visit(self, node):
""" Visit a node.
"""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def visit_FuncCall(self, node):
print("Visiting FuncCall")
print(node.show())
print('---- parent ----')
print(self.current_parent.show())
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
oldparent = self.current_parent
self.current_parent = node
for c in node.children():
self.visit(c)
self.current_parent = oldparent
if __name__ == "__main__":
source_code = r'''void foo() {
L"hi" L"there";
}
'''
parser = CParser()
ast = parser.parse(source_code, filename='zz')
ast.show(showcoord=True, attrnames=True, nodenames=True)
| bsd-3-clause | -2,917,543,122,236,092,000 | 30.152941 | 75 | 0.54003 | false | 4.04893 | false | false | false |
ethereum/pydevp2p | devp2p/tests/test_peermanager.py | 1 | 1405 | from devp2p import peermanager
from devp2p import crypto
from devp2p.app import BaseApp
from rlp.utils import encode_hex
import devp2p.muxsession
import rlp
import devp2p.p2p_protocol
import time
import gevent
import copy
import socket
def try_tcp_connect(addr):
s = socket.socket()
s.connect(addr)
s.close()
def test_app_restart():
host, port = '127.0.0.1', 3020
a_config = dict(p2p=dict(listen_host=host, listen_port=port),
node=dict(privkey_hex=encode_hex(crypto.sha3(b'a'))))
a_app = BaseApp(a_config)
peermanager.PeerManager.register_with_app(a_app)
# Restart app 10-times: there should be no exception
for i in range(10):
a_app.start()
assert a_app.services.peermanager.server.started
try_tcp_connect((host, port))
assert a_app.services.peermanager.num_peers() == 0
a_app.stop()
assert a_app.services.peermanager.is_stopped
# Start the app 10-times: there should be no exception like 'Bind error'
for i in range(10):
a_app.start()
assert a_app.services.peermanager.server.started
try_tcp_connect((host, port))
a_app.stop()
assert a_app.services.peermanager.is_stopped
if __name__ == '__main__':
# ethereum -loglevel 5 --bootnodes ''
import ethereum.slogging
ethereum.slogging.configure(config_string=':debug')
test_app_restart()
| mit | -3,365,345,710,625,542,000 | 26.54902 | 76 | 0.669039 | false | 3.252315 | false | false | false |
CacheBrowser/cachebrowser | cachebrowser/pipes/scrambler.py | 1 | 19207 | # cdn.jsdelivr.net
import logging
from time import time, sleep
from threading import Thread, RLock
from random import random, shuffle, choice
from six.moves.urllib.parse import urlparse
from mitmproxy.models import HTTPResponse
from netlib.http import Headers
from cachebrowser.pipes.base import FlowPipe
from cachebrowser.util import get_flow_size, pretty_bytes
logger = logging.getLogger(__name__)
DOWNSTREAM_STD = 100000
def should_i(prob):
return random() < prob
class ScramblerPipe(FlowPipe):
PROB_AD_BLOCK = 1.0
PROB_AD_DECOY = 1.0
PROB_DECOY = 0.2
OVERHEAD = 0.1
BLOCK_ADS = True
def __init__(self, *args, **kwargs):
super(ScramblerPipe, self).__init__(*args, **kwargs)
self.overhead = self.OVERHEAD
self.drop_ads = True
self.send_decoys = True
self.org_names = self.read_org_names()
self.adblocker = AdBlocker()
self.netstats = NetStatKeeper(self.org_names)
self.decoymaker = DecoyMaker(self.netstats, self.org_names)
self.api = ScramblerAPI(self.context, self)
self.block_count = 0
self.notblock_count = 0
self.upstream_overhead = 0
self.upstream_traffic = 0 # Non-Overhead traffic
self.downstream_overhead = 0
self.downstream_traffic = 0
self.decoysent = 0
self.decoyreceived = 0
self.user_requests = 0
self.blocked_requests = 0
def start(self):
# super(Scrambler, self).start()
self.adblocker.load_blacklist(self.context.settings.data_path('scrambler/ad-domains'),
self.context.settings.data_path('scrambler/blacklist'))
self.decoymaker.load_decoys(self.context.settings.data_path('scrambler/decoy.json'))
def reset(self):
self.block_count = 0
self.notblock_count = 0
self.netstats.reset()
self.upstream_overhead = 0
self.upstream_traffic = 0 # Non-Overhead traffic
self.downstream_overhead = 0
self.downstream_traffic = 0
self.decoysent = 0
self.decoyreceived = 0
self.decoymaker.inflight = 0
self.user_requests = 0
self.blocked_requests = 0
def get_stats(self):
return {
'blocked': 0,
'upstream_overhead': self.upstream_overhead,
'upstream_normal': self.upstream_traffic,
'downstream_overhead': self.downstream_overhead,
'downstream_normal': self.downstream_traffic,
'decoys': self.decoyreceived,
'decoys_sent': self.decoysent,
'max_overhead': self.overhead,
'user_requests': self.user_requests,
'blocked_requests': self.blocked_requests,
'adblock_enabled': self.BLOCK_ADS
}
def serverconnect(self, server_conn):
pass
def print_stats(self):
print(self.decoymaker.inflight)
print("Sent: {} Received: {} Overhead: {} Traffic: {} Overhead: {} Traffic: {} ".format(self.decoysent, self.decoyreceived,
pretty_bytes(self.downstream_overhead), pretty_bytes(self.downstream_traffic),
pretty_bytes(self.upstream_overhead), pretty_bytes(self.upstream_traffic)))
def request(self, flow):
is_decoy = hasattr(flow, 'is_decoy') and flow.is_decoy
if is_decoy:
self.netstats.update_real_upstream(flow)
self.upstream_overhead += get_flow_size(flow)[0]
self.decoysent += 1
else:
self.netstats.update_real_upstream(flow)
self.netstats.update_requested_upstream(flow)
self.upstream_traffic += get_flow_size(flow)[0]
self.user_requests += 1
if self.BLOCK_ADS and self.adblocker.should_block(flow):
self.blocked_requests += 1
self.dummy_response(flow)
self._send_decoy_request(skip_netname=_whois(flow, self.org_names))
else:
for i in range(6):
wanted = sum(self.netstats.requested_downstream_traffic.values())
actual = sum(self.netstats.real_downstream_traffic.values())
if actual + self.decoymaker.inflight < wanted + wanted * self.overhead:
self._send_decoy_request()
# self.print_stats()
# print("")
# logging.info('>> {} {} '.format(self.notblock_count, self.block_count))
def response(self, flow):
is_decoy = hasattr(flow, 'is_decoy') and flow.is_decoy
if is_decoy:
self.netstats.update_real_downstream(flow)
self.decoymaker.record_decoy_received(flow)
self.decoyreceived += 1
self.downstream_overhead += get_flow_size(flow)[1]
else:
self.netstats.update_real_downstream(flow)
self.netstats.update_requested_downstream(flow)
self.downstream_traffic += get_flow_size(flow)[1]
def read_org_names(self):
with open(self.context.settings.data_path('scrambler/decoy.json')) as f:
import json
netname_data = json.loads(f.read())
org_names = netname_data.keys()
org_names.append('OTHER')
return org_names
def _send_decoy_request(self, skip_netname=None):
decoyurl = self.decoymaker.get_decoy_url(skip_netname)
# logging.info("Sending DECOY to {}".format(decoyurl))
if decoyurl is not None:
new_flow = self.create_request_from_url('GET', decoyurl)
# Don't update stats on dummy request
new_flow.outgoing_request = True
new_flow.is_decoy = True
self.send_request(new_flow, run_hooks=True)
self.decoymaker.record_decoy_sent(new_flow, decoyurl)
def handle_ads(self, flow):
domain = urlparse(flow.request.url).netloc
if self.adblocker.should_block(flow) and self.drop_ads and should_i(self.PROB_AD_BLOCK):
self.dummy_response(flow)
if self.send_decoys and should_i(self.PROB_AD_DECOY):
decoy_url = self.decoymaker.get_decoy_url(flow)
if decoy_url is not None:
# logging.info("@@@@@@@@@@@@@@ Sending Decoy Request {}".format(decoy_url))
new_flow = self.create_request_from_url('GET', decoy_url)
# Don't update stats on dummy request
new_flow.outgoing_request = True
new_flow.is_dummy = True
self.send_request(new_flow, run_hooks=True)
return True
return False
def dummy_response(self, flow):
resp = HTTPResponse(
"HTTP/1.1", 444, "Blocked",
Headers(Content_Type="text/html"),
"You got blocked by CDNReaper")
flow.reply(resp)
def error(self, flow):
pass
class ScramblerAPI(object):
def __init__(self, context, scrambler):
self.scrambler = scrambler
context.ipc.register_rpc('/scrambler/get/settings', self.get_settings)
context.ipc.register_rpc('/scrambler/set/settings', self.set_settings)
context.ipc.register_rpc('/scrambler/enable', self.enable_scrambler)
context.ipc.register_rpc('/scrambler/disable', self.disable_scrambler)
def get_settings(self, context, request):
request.reply({
'result': 'success',
'settings': {
'enabled': self.scrambler.enabled,
'overhead': self.scrambler.overhead,
'drops': self.scrambler.drop_ads,
'decoys': self.scrambler.send_decoys
}
})
def set_settings(self, context, request):
if 'enabled' in request.params:
self.scrambler.enabled = bool(request.params['enabled'])
if 'overhead' in request.params:
self.scrambler.overhead = int(request.params['overhead'])
if 'drops' in request.params:
self.scrambler.drop_ads = bool(request.params['drops'])
if 'decoys' in request.params:
self.scrambler.send_decoys = bool(request.params['decoys'])
request.reply({
'result': 'success'
})
def enable_scrambler(self, context, request):
self.scrambler.enable()
request.reply({'result': 'success'})
def disable_scrambler(self, context, request):
self.scrambler.disable()
request.reply({'result': 'success'})
class NetStatKeeper(object):
UPSTREAM_STD = 200
DOWNSTREAM_STD = DOWNSTREAM_STD
S = 10
def __init__(self, org_names):
from collections import deque
self.org_names = org_names
self.requested_upstream = {}
self.requested_downstream = {}
self.requested_upstream_traffic = {}
self.requested_downstream_traffic = {}
self.real_upstream = {}
self.real_downstream = {}
self.real_upstream_traffic = {}
self.real_downstream_traffic = {}
self.lock = RLock()
# self.outgoing_lock = RLock()
for org in org_names:
self.requested_upstream[org] = deque()
self.requested_downstream[org] = deque()
self.real_downstream[org] = deque()
self.real_upstream[org] = deque()
self.real_downstream_traffic[org] = 0
self.real_upstream_traffic[org] = 0
self.requested_downstream_traffic[org] = 0
self.requested_upstream_traffic[org] = 0
def refresher():
def refresh(ds):
for k in ds:
while len(ds[k]):
if ds[k][0][0] < threshold:
ds[k].popleft()
else:
break
while True:
sleep(1)
now = time()
threshold = now - self.S
with self.lock:
refresh(self.requested_downstream)
refresh(self.requested_upstream)
refresh(self.real_downstream)
refresh(self.real_upstream)
for netname in org_names:
self.requested_upstream_traffic[netname] = 0
for item in self.requested_upstream[netname]:
self.requested_upstream_traffic[netname] += item[1]
self.requested_downstream_traffic[netname] = 0
for item in self.requested_downstream[netname]:
self.requested_downstream_traffic[netname] += item[1]
self.real_upstream_traffic[netname] = 0
for item in self.real_upstream[netname]:
self.real_upstream_traffic[netname] += item[1]
self.real_downstream_traffic[netname] = 0
for item in self.real_downstream[netname]:
self.real_downstream_traffic[netname] += item[1]
refresh_thread = Thread(target=refresher)
refresh_thread.daemon = True
refresh_thread.start()
def update_requested_downstream(self, flow):
ip = _get_flow_ip(flow)
if ip is None:
return
_, resp = get_flow_size(flow)
netname = _whois(ip, self.org_names)
with self.lock:
self.requested_downstream_traffic[netname] += resp
self.requested_downstream[netname].append((time(), resp))
def update_requested_upstream(self, flow):
ip = _get_flow_ip(flow)
if ip is None:
return
req, _ = get_flow_size(flow)
netname = _whois(ip, self.org_names)
with self.lock:
self.requested_upstream_traffic[netname] += req
self.requested_upstream[netname].append((time(), req))
def update_real_downstream(self, flow):
ip = _get_flow_ip(flow)
if ip is None:
return
_, resp = get_flow_size(flow)
netname = _whois(ip, self.org_names)
with self.lock:
self.real_downstream_traffic[netname] += resp
self.real_downstream[netname].append((time(), resp))
def update_real_upstream(self, flow):
ip = _get_flow_ip(flow)
if ip is None:
return
req, _ = get_flow_size(flow)
netname = _whois(ip, self.org_names)
with self.lock:
self.real_upstream_traffic[netname] += req
self.real_upstream[netname].append((time(), req))
def reset(self):
with self.lock:
for key in self.requested_downstream:
self.requested_downstream[key].clear()
self.requested_upstream[key].clear()
self.real_downstream[key].clear()
self.real_upstream[key].clear()
class DecoyMaker(object):
def __init__(self, netstats, org_names):
self.netstats = netstats
self.decoy_urls = {}
self.decoy_sizes = {}
self.netnames = []
self.inflight = 0
for org in org_names:
self.netnames.append(org)
def get_decoy_url(self, skip_netname=None):
flow_netname = skip_netname
shuffle(self.netnames)
def key(netname):
if netname == flow_netname:
return 100000
if netname == 'OTHER':
return 100000
if netname not in self.decoy_urls or not len(self.decoy_urls[netname]):
return 50000
return self.netstats.requested_upstream_traffic[netname]
self.netnames.sort(key=key)
netname = self.netnames[0]
if netname not in self.decoy_urls or not len(self.decoy_urls[netname]):
return None
# return self.decoy_urls[netname][0]
return choice(self.decoy_urls[netname])
def record_decoy_sent(self, flow, url):
flow.estimated_size = self.decoy_sizes[url]
self.inflight += flow.estimated_size
def record_decoy_received(self, flow):
self.inflight -= flow.estimated_size
def load_decoys(self, decoys_path):
import yaml
import json
# json loads strings as unicode, causes problems with saving flows
with open(decoys_path) as f:
# decoy_urls = yaml.safe_load(f.read())
decoy_urls = json.loads(f.read())
for netname in decoy_urls:
self.decoy_urls[netname] = [str(s) for s in decoy_urls[netname].keys()]
for url in decoy_urls[netname]:
self.decoy_sizes[str(url)] = decoy_urls[netname][url]
# self.decoy_sizes.update(decoy_urls[netname])
class AdBlocker(object):
def __init__(self):
self.single_dom = set()
self.multi_dom = set()
self.adset = set()
self.blacklist = []
def should_block(self, flow):
from fnmatch import fnmatch
domain = urlparse(flow.request.url).netloc
parts = domain.split('.')
dom = parts.pop()
while parts:
dom = '{}.{}'.format(parts.pop(), dom)
if dom in self.adset:
return True
url = flow.request.url.replace('https://', '').replace('http://', '')
for pattern in self.blacklist:
if fnmatch(url, pattern):
return True
return False
def load_blacklist(self, ad_domains_path, blacklist_path):
with open(ad_domains_path) as f:
for ad in f:
ad = ad.strip()
if not ad: continue
if ad.count('.') == 1:
self.single_dom.add(ad)
else:
self.multi_dom.add(ad)
self.adset.add(ad)
with open(blacklist_path) as f:
for dom in f:
dom = dom.strip()
if dom:
self.blacklist.append(dom)
def _get_flow_ip(flow):
if flow.server_conn and flow.server_conn.peer_address:
return flow.server_conn.peer_address.host
domain = urlparse(flow.request.url).netloc
ips, domains = _dig(domain)
if len(ips):
return ips[0]
return None
_whois_cache = {}
def _whois(ip, org_names):
from ipwhois import IPWhois
if type(ip) is not str:
ip = _get_flow_ip(ip)
if ip not in _whois_cache:
whois = IPWhois(ip)
try:
name = whois.lookup_rdap()['network']['name']
if not name:
name = whois.lookup()['nets'][0]['name']
except:
print("WHOIS ERROR")
name = 'OTHER'
_whois_cache[ip] = _clean_netname(org_names, name, ip)
return _whois_cache[ip]
def clean_netname(netname):
"""
Convert a whois netname into an organization name
"""
# from cdn import cdn_list
ORGS = [
('GOOGLE', ['google']),
('AKAMAI', ['akamai', 'umass']),
('AMAZON', ['at-', 'amazo']),
# ('CLOUDFRONT', []),
('FASTLY', ['fastly']),
('CLOUDFLARE', ['cloudflare']),
('EDGECAST', ['edgecast']),
('HIGHWINDS', ['highwind']),
('INCAPSULA', ['incapsula']),
('MAXCDN', ['netdna']),
('CDNET', ['cdnet']),
('TWITTER', ['twitter']),
('INAP', ['inap-']),
('LINODE', ['linode']),
('DIGITALOCEAN', ['digitalocean']),
('YAHOO', ['yahoo']),
('FACEBOOK', ['facebook', 'ord1', 'tfbnet']),
('OTHER', [])
]
if ' ' in netname:
netname = netname.split()[0]
lower = netname.lower()
for org in ORGS:
if any([x in lower for x in org[1]]):
return org[0]
else:
org = netname.split()[0]
# if '-' in org:
# org = org[:org.rindex('-')]
parts = org.split('-')
if len(parts) < 3:
org = parts[0]
elif parts[1].isdigit() :
org = parts[0]
else:
org = parts[0] + '-' + parts[1] #+ '-' + parts[2]
# if org.startswith('AMAZO') or org.startswith('AT-'):
# org = 'AMAZON'
if org.startswith('WEBAIRINTERNET12'):
org = 'WEBAIRINTERNET12'
return org
def _clean_netname(org_names, name, ip):
org = clean_netname(name)
if name in org_names:
return name
return 'OTHER'
def _parse_dig(raw_dig):
import re
if len(raw_dig.strip()) == 0:
return [], []
lines = raw_dig.strip().split('\n')
ip = []
domains = []
for line in lines:
line = line.strip()
if re.match('^\d+[.]\d+[.]\d+[.]\d+$', line):
ip.append(line)
else:
domains.append(line)
return ip, domains
def _dig(site, raw=False):
from subprocess import Popen, PIPE
process = Popen(["dig", "+short", site], stdout=PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
if raw:
return output
return _parse_dig(output)
| mit | -7,097,376,522,005,328,000 | 30.799669 | 151 | 0.552038 | false | 3.686564 | false | false | false |
pschmitt/zhue | zhue/model/config.py | 1 | 1540 | from __future__ import absolute_import
from __future__ import unicode_literals
from .basemodel import HueBaseObject
from .user import User
class BridgeConfig(HueBaseObject):
def __init__(self, bridge, json):
super(BridgeConfig, self).__init__(bridge, 'config', json)
# Versions
@property
def api_version(self):
return self._json['apiversion']
@property
def version(self):
return self._json['swversion']
@property
def bridge_id(self):
return self._json['bridgeid']
# Network stuff
@property
def dhcp(self):
return self._json['dhcp']
@property
def gateway(self):
return self._json['gateway']
@property
def mac(self):
return self._json['mac']
@property
def netmask(self):
return self._json['netmask']
@property
def zigbeechannel(self):
return self._json['zigbeechannel']
@property
def factorynew(self):
return self._json['factorynew']
@property
def timezone(self):
return self._json['timezone']
@property
def localtime(self):
return self._json['localtime']
@property
def utc(self):
return self._json['UTC']
@property
def users(self):
u = []
for k, v in self._json['whitelist'].items():
u.append(User(username=k, json=v))
return u
def update_check(self):
res = self._request(data={"swupdate": {"checkforupdate":True}})
self.update()
return res
| gpl-3.0 | 1,750,811,497,076,904,700 | 20.690141 | 71 | 0.593506 | false | 4.031414 | false | false | false |
dewitt/webfingerclient-dclinton | xrd.py | 1 | 8816 | #!/usr/bin/python2.5
#
# Parses XRD documents.
#
# Copyright 2009 DeWitt Clinton
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imports
import xrd_pb2
# As specified in:
# http://www.oasis-open.org/committees/download.php/33772/xrd-1.0-wd04.html
XRD_NAMESPACE = 'http://docs.oasis-open.org/ns/xri/xrd-1.0'
# As specifed in http://www.w3.org/TR/xml-names/
XML_NAMESPACE = 'http://www.w3.org/XML/1998/namespace'
# As specified in http://www.w3.org/TR/xmlschema-1/
XSI_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'
# The etree syntax for qualified element names
XRD_QNAME = '{%s}%s' % (XRD_NAMESPACE, 'XRD')
EXPIRES_QNAME = '{%s}%s' % (XRD_NAMESPACE, 'Expires')
SUBJECT_QNAME = '{%s}%s' % (XRD_NAMESPACE, 'Subject')
PROPERTY_QNAME = '{%s}%s' % (XRD_NAMESPACE, 'Property')
ALIAS_QNAME = '{%s}%s' % (XRD_NAMESPACE, 'Alias')
LINK_QNAME = '{%s}%s' % (XRD_NAMESPACE, 'Link')
TITLE_QNAME = '{%s}%s' % (XRD_NAMESPACE, 'Title')
# The etree syntax for qualified attribute names
ID_ATTRIBUTE = '{%s}%s' % (XML_NAMESPACE, 'id')
LANG_ATTRIBUTE = '{%s}%s' % (XML_NAMESPACE, 'lang')
NIL_ATTRIBUTE = '{%s}%s' % (XSI_NAMESPACE, 'nil')
class ParseError(Exception):
"""Raised in the event an XRD document can not be parsed."""
pass
class Parser(object):
"""Converts XML documents into xrd_pb2.Xrd instances."""
def __init__(self, etree=None):
"""Constructs a new XRD parser.
Args:
etree: The etree module to use [optional]
"""
if etree:
self._etree = etree
else:
import xml.etree.cElementTree
self._etree = xml.etree.cElementTree
def parse(self, string):
"""Converts XML strings into an xrd_pb2.Xrd instances
Args:
string: A string containing an XML XRD document.
Returns:
A xrd_pb2.Xrd instance.
Raises:
ParseError if the element can not be parsed
"""
if not string:
raise ParseError('Empty input string.')
try:
document = self._etree.fromstring(string)
except SyntaxError, e:
raise ParseError('Could not parse %s\nError: %s' % (string, e))
if document.tag != XRD_QNAME:
raise ParseError('Root is not an <XRD/> element: %s' % document)
description = xrd_pb2.Xrd()
self._parse_id(document, description)
self._parse_expires(document, description)
self._parse_subject(document, description)
self._parse_properties(document, description)
self._parse_aliases(document, description)
self._parse_links(document, description)
return description
def _parse_id(self, xrd_element, description):
"""Finds a xml:id attribute and adds it to the Xrd proto.
Args:
xrd_element: An XRD Element
description: The xrd_pb2.Xrd instance to be added to
"""
id_attribute = xrd_element.get(ID_ATTRIBUTE)
if id_attribute is not None:
description.id = id_attribute
def _parse_expires(self, xrd_element, description):
"""Finds an Expires element and adds it to the Xrd proto.
Args:
xrd_element: An XRD Element
description: The xrd_pb2.Xrd instance to be added to
"""
expires_element = xrd_element.find(EXPIRES_QNAME)
if expires_element is not None:
description.expires = expires_element.text
def _parse_subject(self, xrd_element, description):
"""Finds an Subject element and adds it to the Xrd proto.
Args:
xrd_element: An XRD Element
description: The xrd_pb2.Xrd instance to be added to
"""
subject_element = xrd_element.find(SUBJECT_QNAME)
if subject_element is not None:
description.subject = subject_element.text
def _parse_properties(self, xrd_element, description):
"""Finds Property elements and adds them to the Xrd proto.
Args:
xrd_element: An XRD Element
description: The xrd_pb2.Xrd instance to be added to
"""
for property_element in xrd_element.findall(PROPERTY_QNAME):
property_pb = description.properties.add()
property_pb.nil = (property_element.get(NIL_ATTRIBUTE) == 'true')
property_type = property_element.get('type')
if property_type != None:
property_pb.type = property_type
if property_element.text is not None:
property_pb.value = property_element.text
def _parse_aliases(self, xrd_element, description):
"""Finds Alias elements and adds them to the Xrd proto.
Args:
xrd_element: An XRD Element
description: The xrd_pb2.Xrd instance added to
"""
for alias_element in xrd_element.findall(ALIAS_QNAME):
description.aliases.append(alias_element.text)
def _parse_links(self, xrd_element, description):
"""Finds Link elements and adds them to the Xrd proto.
Args:
xrd_element: An XRD Element
description: The xrd_pb2.Xrd instance to be added to
"""
for link_element in xrd_element.findall(LINK_QNAME):
link = description.links.add()
rel = link_element.get('rel')
if rel is not None:
link.rel = rel
type_attribute = link_element.get('type')
if type_attribute is not None:
link.type = type_attribute
href = link_element.get('href')
if href is not None:
link.href = href
template = link_element.get('template')
if template is not None:
link.template = template
self._parse_properties(link_element, link)
self._parse_titles(link_element, link)
def _parse_titles(self, xrd_element, description):
"""Finds Title elements and adds them to the proto.
Args:
xrd_element: An XRD Element
description: The xrd_pb2.Xrd instance to be added to
"""
for title_element in xrd_element.findall(TITLE_QNAME):
title = description.titles.add()
lang = title_element.get(LANG_ATTRIBUTE)
if lang is not None:
title.lang = lang
if title_element.text is not None:
title.value = title_element.text
class JsonMarshaller(object):
def __init__(self):
try:
import simplejson as json
except ImportError:
import json
self._json = json
def to_json(self, description_or_descriptions, pretty=False):
if isinstance(description_or_descriptions, list):
output = list()
for description in description_or_descriptions:
output.append(self._to_object(description))
else:
output = self._to_object(description_or_descriptions)
if pretty:
return self._json.dumps(output, indent=2)
else:
return self._json.dumps(output)
def _to_object(self, description):
output = dict()
if description.id:
output['id'] = description.id
if description.expires:
output['expires'] = description.expires
if description.subject:
# jsmarr: note we're intentionally dropping any attributes on subject
output['subject'] = description.subject
if description.aliases:
# jsmarr: note we're intentionally dropping any attributes on aliases
output['aliases'] = [str(alias) for alias in description.aliases]
if description.properties:
output['properties'] = list()
for p in description.properties:
prop_val = dict()
if p.type:
prop_val['type'] = p.type
if p.value:
prop_val['value'] = p.value
output['properties'].append(prop_val)
if description.links:
output['links'] = list()
for link in description.links:
link_dict = dict()
if link.rel:
link_dict['rel'] = link.rel
if link.type:
link_dict['type'] = link.type
if link.href:
link_dict['href'] = link.href
if link.template:
link_dict['template'] = link.template
if link.titles:
# jsmarr: note we're assuming at most one title-per-language
title_dict = dict()
for title in link.titles:
if not title.value:
continue
title_lang = title.lang or ''
if title_lang not in title_dict:
title_dict[title_lang] = title.value
if title_dict:
link_dict['titles'] = title_dict
output['links'].append(link_dict)
# jsmarr: note we're not representing signature in json
return output
| apache-2.0 | -8,425,790,412,987,392,000 | 32.907692 | 77 | 0.645644 | false | 3.668747 | false | false | false |
okfn/jsontableschema-openrefine-py | jsontableschema/plugins/openrefine/openrefine_client.py | 1 | 2646 | import requests
import six
import six.moves.urllib.parse as urlparse
class OpenRefineClient(object):
_COMMANDS = {
'get_version': 'command/core/get-version',
'get_all_project_metadata': 'command/core/get-all-project-metadata',
'get_project_metadata': 'command/core/get-project-metadata',
'create_project_from_upload': 'command/core/create-project-from-upload',
'delete_project': 'command/core/delete-project',
'export_rows': 'command/core/export-rows',
}
def __init__(self, server_url):
if not isinstance(server_url, six.string_types):
raise TypeError('"server_url" must be a string')
self.server_url = server_url
@property
def version(self):
url = self._generate_url(self._COMMANDS['get_version'])
res = requests.get(url)
return res.json()['version']
def create_project(self, name, filepath):
url = self._generate_url(self._COMMANDS['create_project_from_upload'])
with open(filepath, 'rb') as project_file:
params = {
'project-name': name,
}
files = {
'file': project_file,
}
res = requests.post(url, allow_redirects=False,
data=params, files=files)
if res.is_redirect and res.headers.get('location'):
redirected_to = urlparse.urlparse(res.headers.get('location'))
query_params = urlparse.parse_qs(redirected_to.query)
return query_params.get('project')[0]
def get_projects(self):
url = self._generate_url(self._COMMANDS['get_all_project_metadata'])
res = requests.get(url)
return res.json().get('projects', {})
def get_project(self, project_id):
url = self._generate_url(self._COMMANDS['get_project_metadata'])
res = requests.get(url, params={'project': project_id})
if res.status_code == 200:
return res.json()
def delete_project(self, project_id):
url = self._generate_url(self._COMMANDS['delete_project'])
res = requests.post(url, params={'project': project_id})
if res.status_code == 200:
return res.json().get('code') == 'ok'
def export_project(self, project_id, file_format='csv'):
url = self._generate_url(self._COMMANDS['export_rows'])
res = requests.post(url, params={
'project': project_id,
'format': file_format,
})
return res.text
def _generate_url(self, command):
return urlparse.urljoin(self.server_url, command)
| mit | -8,839,431,766,508,617,000 | 35.75 | 80 | 0.589569 | false | 3.92 | false | false | false |
bjamesv/pymoldmaker | image.py | 1 | 2378 | #!/bin/python3
"""
this file is a part of pymoldmaker
Copyright (C) 2015 Brandon J. Van Vaerenbergh
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from PIL import Image
from PIL import ImageDraw
class Canvas:
""" an object representing a drawable vector image that can be written out
to disk as an image file.
"""
def __init__(self):
""" initializes the internal self.image object and pairs it with a
self.draw object for composing new visual content.
"""
# create a Python Image Library image object
mode = 'RGB'
self.dpi = 72 #25.4*15
size = (400, 300)
color_background = 'white'
self.image = Image.new(mode, size, color_background)
#prepare Image for drawing
color_lines = 'black'
self.draw = ImageDraw.Draw(self.image)
self.draw.setink( color_lines)
def draw_line(self, poly_line_mm):
"""
draws a polygonal line onto the image.
Arguments: poly_line_mm a list of x,y tuplets representing the vertices
of a polygonal shape in milimeters.
"""
# now draw something onto it
self.draw.line( poly_line_mm)
def save(self, destination):
""" saves the internal image representation to the specified path or
file object. """
# .. and save it out to disk
self.image.save( destination)
def mm_to_px(self, mm):
mm_per_inch = 25.4
pixels_per_mm = self.dpi/mm_per_inch
pixels_to_draw = mm*pixels_per_mm
if( (pixels_to_draw % 1) != 0):
pass #TODO: fix above to support mapping 1/3 mm to PS strokes/dots
#raise ValueError("specified dpi has resulted in lost precision")
return int(pixels_to_draw)
| gpl-3.0 | -3,171,076,100,930,244,600 | 36.15625 | 79 | 0.650967 | false | 4.030508 | false | false | false |
alexanderfefelov/nav | python/nav/netmap/metadata.py | 1 | 17323 | #
# Copyright (C) 2012 UNINETT AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Handles attaching and converting metadata in a netmap networkx toplogy
graph"""
from collections import defaultdict
import logging
from django.core.urlresolvers import reverse
import operator
from nav.netmap.config import NETMAP_CONFIG
from nav.errors import GeneralException
from nav.models.manage import GwPortPrefix, Interface
from nav.netmap import stubs
from nav.web.netmap.common import get_status_image_link
_LOGGER = logging.getLogger(__name__)
class NetmapException(GeneralException):
"""Generic Netmap Exception"""
pass
class GraphException(NetmapException):
"""Graph Exception
This exception is normally thrown if it finds something odd in the graph
from nav.topology or the metadata contains known errors.
"""
pass
# Ignore too few methods in class
# pylint: disable=R0903
class Node(object):
"""Node object represent a node in the netmap_graph
Makes it easier to validate data and convert node to valid json.
"""
def __init__(self, node, nx_node_metadata=None):
self.node = node
if nx_node_metadata and 'metadata' in nx_node_metadata:
self.metadata = nx_node_metadata['metadata']
else:
self.metadata = None
def __repr__(self):
return "netmap.Node(metadata={0!r})".format(self.metadata)
def to_json(self):
"""json presentation of Node"""
json = {}
if self.metadata:
if 'position' in self.metadata:
json.update({
'position': {
'x': self.metadata['position'].x,
'y': self.metadata['position'].y
}})
if 'vlans' in self.metadata: # Layer2 metadata
json.update({
'vlans': [nav_vlan_id for nav_vlan_id, _ in
self.metadata['vlans']]
})
if NETMAP_CONFIG.getboolean('API_DEBUG'):
json.update({
'd_vlans': [vlan_to_json(swpv.vlan) for _, swpv in
self.metadata['vlans']]
})
if isinstance(self.node, stubs.Netbox):
json.update({
'id': str(self.node.id),
'sysname': self.node.sysname,
'category': str(self.node.category_id),
'is_elink_node': True
})
else:
json.update({
'id': str(self.node.id),
'sysname': str(self.node.sysname),
'category': str(self.node.category_id),
'ip': self.node.ip,
'ipdevinfo_link': reverse('ipdevinfo-details-by-name',
args=[self.node.sysname]),
'up': str(self.node.up),
'up_image': get_status_image_link(self.node.up),
'roomid': self.node.room.id,
'locationid': unicode(self.node.room.location.id),
'location': unicode(self.node.room.location.description),
'room': unicode(self.node.room),
'is_elink_node': False,
})
return {unicode(self.node.id) : json}
# Ignore too few methods in class
# pylint: disable=R0903
class Group(object):
"""Grouping object for representing a Netbox and Interface in a Edge"""
def __init__(self, netbox=None, interface=None):
self.netbox = netbox
self.interface = interface
self.gw_ip = None
self.virtual = None
self.vlans = None
def __repr__(self):
return ("netmap.Group(netbox={0!r}, interface={1!r}, gw_ip={2!r}"
", virtual={3!r}, vlans={4!r})").format(
self.netbox, self.interface, self.gw_ip, self.virtual, self.vlans)
def __hash__(self):
return hash(self.netbox) + hash(self.interface)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
else:
return (self.netbox == other.netbox and
self.interface == other.interface)
def to_json(self):
"""json presentation of Group"""
json = {
'netbox': unicode(self.netbox.id),
}
if self.interface is not None:
ipdevinfo_link = None
if self.interface.ifname and self.interface.ifname != '?':
ipdevinfo_link = reverse(
'ipdevinfo-interface-details-by-name',
kwargs={'netbox_sysname': unicode(
self.netbox.sysname),
'port_name': unicode(
self.interface.ifname)})
json.update({'interface': {
'ifname': unicode(self.interface.ifname),
'ipdevinfo_link': ipdevinfo_link
}})
if self.gw_ip is not None:
json.update({'gw_ip': self.gw_ip})
if self.virtual is not None:
json.update({'virtual': self.virtual})
if self.vlans is not None:
json.update({'vlans': [swpv.vlan.id for swpv in self.vlans]})
if NETMAP_CONFIG.getboolean('API_DEBUG'):
json.update({'d_netbox_sysname': unicode(self.netbox.sysname)})
json.update(
{'d_vlans': [vlan_to_json(swpv.vlan) for swpv in self.vlans]})
return json
# Ignore too few methods in class
# pylint: disable=R0903
class Edge(object):
"""Represent either a edge pair in Layer2 or Layer3"""
def _valid_layer2(self, edge):
return isinstance(edge, Interface) or isinstance(edge, stubs.Interface)
def _valid_layer3(self, edge):
return isinstance(edge, GwPortPrefix) or isinstance(edge,
stubs.GwPortPrefix)
def _get_layer(self, source, target):
if (self._valid_layer2(source) or source is None
and self._valid_layer2(target) or target is None):
return 2
elif (self._valid_layer3(source) or source is None
and self._valid_layer3(target) or target is None):
return 3
else:
raise NetmapException("Could not determine layer for this edge."
" This should _not_ happend")
def _same_layer(self, source, target):
return (self._valid_layer2(source) and self._valid_layer2(target)
or self._valid_layer3(source) and self._valid_layer3(target)
)
def __init__(self, nx_edge, source, target, traffic=None):
"""
:param nx_edge: NetworkX edge representing (source,target) in a tuple
.(they be nav.models.Netbox or nav.netmap.stubs.Netbox)
:param source: source, where it is either of type Interface or type
GwPortPrefix.
:param target: target, where it is either of type Interface or type
GwPortPrefix
:param vlans: List of SwPortVlan on this particular edge pair
:return:
"""
if source is not None and target is not None:
if not self._same_layer(source, target):
raise GraphException(
"Source and target has to be of same type, typically "
"Interfaces in layer2 graph or"
"GwPortPrefixes in layer3 graph")
elif source is None and target is None:
raise GraphException("Source & target can't both be None! Bailing!")
self.errors = []
self.source = self.target = self.vlan = self.prefix = None
nx_source, nx_target = nx_edge
if self._valid_layer2(source) :
self.source = Group(source.netbox, source)
elif self._valid_layer3(source):
self.source = Group(source.interface.netbox, source.interface)
self.source.gw_ip = source.gw_ip
self.source.virtual = source.virtual
if self._valid_layer2(target):
self.target = Group(target.netbox, target)
elif self._valid_layer3(target):
self.target = Group(target.interface.netbox, target.interface)
self.target.gw_ip = target.gw_ip
self.target.virtual = target.virtual
# Basic metadata validation, lets copy over Netbox data which is valid
# as metadata if metadata building didn't manage to fetch it's data.
# (this is due to Metadata in L2 is built on Interface<->Interface,
# both sides is not necessary known in the topology graph when building
# it)
# This could also be the case for L3, but since the topology method
# stubs.Netbox and stubs.Interface, we don't really have the same issue
# in an L3 graph.
if self.source is None: self.source = Group(nx_source)
if self.target is None: self.target = Group(nx_target)
# Swap directional metadata to follow nx graph edge.
if (self.source.netbox.id != nx_source.id) and (
self.source.netbox.id == nx_target.id):
tmp = self.source
self.source = self.target
self.target = tmp
self.layer = self._get_layer(source, target)
if self.layer == 3:
assert source.prefix.vlan.id == target.prefix.vlan.id, (
"Source and target GwPortPrefix must reside in same VLan for "
"Prefix! Bailing")
self.prefix = source.prefix
self.vlan = source.prefix.vlan
self.traffic = traffic
if (self.source and self.source.interface is not None) and (
self.target and self.target.interface is not None):
if self.source.interface.speed == self.target.interface.speed:
self.link_speed = self.source.interface.speed
else:
self.errors.append("Mismatch between interface speed")
if self.source.interface.speed < self.target.interface.speed:
self.link_speed = self.source.interface.speed
else:
self.link_speed = self.target.interface.speed
elif self.source and self.source.interface is not None:
self.link_speed = self.source.interface.speed
elif self.target and self.target.interface is not None:
self.link_speed = self.target.interface.speed
self.vlans = []
def __hash__(self):
return hash(self.source) + hash(self.target)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
else:
return self.source == other.source and self.target == other.target
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return ("netmap.Edge(layer={0!r}, source={1!r}, target={2!r},"
"link_speed={3!r}, vlans={4!r}, vlan={5!r},"
"prefix={6!r})").format(self.layer, self.source, self.target,
self.link_speed, self.vlans, self.vlan,
self.prefix)
def to_json(self):
"""json presentation of Edge"""
json = {
'source': self.source.to_json() or 'null',
'target': self.target.to_json() or 'null',
}
if self.layer == 3:
json.update({'prefix': {
'net_address': unicode(self.prefix.net_address),
'report_link': reverse('report-prefix-prefix',
kwargs={'prefix_id': self.prefix.id})
}})
json.update({'vlan': self.prefix.vlan.id})
elif self.layer == 2:
json.update({'vlans': [swpv.vlan.id for swpv in self.vlans]})
json.update({'link_speed': self.link_speed or 'N/A'})
json.update(
{'traffic': self.traffic and self.traffic.to_json() or None})
return json
def vlan_to_json(vlan):
return {'vlan': vlan.vlan,
'nav_vlan': vlan.id,
'net_ident': vlan.net_ident,
'description': vlan.description
}
def get_vlan_lookup_json(vlan_by_interface):
vlan_lookup = {}
for list_of_swpv in vlan_by_interface.itervalues():
for swpv in list_of_swpv:
vlan_lookup[swpv.vlan.id] = vlan_to_json(swpv.vlan)
return vlan_lookup
def node_to_json_layer2(node, nx_metadata=None):
"""Convert a node to json, for use in a netmap layer2 graph
:param node A Netbox model
:param nx_metadata Metadata from networkx graph.
:return json presentation of a node.
"""
return Node(node, nx_metadata).to_json()
def node_to_json_layer3(node, nx_metadata=None):
"""Convert a node to json, for use in a netmap layer3 graph
:param node A Netbox model
:param nx_metadata Metadata from networkx graph.
:return json presentation of a node.
"""
return Node(node, nx_metadata).to_json()
def edge_to_json_layer2(nx_edge, metadata):
"""Convert a edge between A and B in a netmap layer2 graph to JSON
:param edge Metadata from netmap networkx graph
:return edge representation in JSON
"""
source, target = nx_edge
edges = metadata['metadata']
metadata_for_edges = []
all_vlans = set()
for edge in edges:
all_vlans = all_vlans | edge.vlans
metadata_for_edges.append(edge.to_json())
json = {
'source': unicode(source.id),
'target': unicode(target.id),
'vlans' : [swpv.vlan.id for swpv in all_vlans],
'edges': metadata_for_edges
}
if NETMAP_CONFIG.getboolean('API_DEBUG'):
json.update({
'd_source_sysname': unicode(source.sysname),
'd_target_sysname': unicode(target.sysname),
'd_vlans': [vlan_to_json(swpv.vlan) for swpv in all_vlans]
})
return json
def edge_to_json_layer3(nx_edge, nx_metadata):
"""Convert a edge between A and B in a netmap layer 3 graph to JSON
:param nx_metadata: Metadata from netmap networkx graph
:type nx_metadata: dict
:return edge representation in JSON
"""
source, target = nx_edge
# todo: fix sorted list keyed on prefix :-))
metadata_collection = defaultdict(list)
for vlan_id, edges in nx_metadata['metadata'].iteritems():
for edge in edges:
metadata_collection[vlan_id].append(edge.to_json())
for key, value in metadata_collection.iteritems():
value = sorted(value, key=operator.itemgetter('prefix'))
json = {
'source': unicode(source.id),
'target': unicode(target.id),
'edges': metadata_collection
}
if NETMAP_CONFIG.getboolean('API_DEBUG'):
json.update({
'd_source_sysname': unicode(source.sysname),
'd_target_sysname': unicode(target.sysname),
})
return json
def edge_metadata_layer3(nx_edge, source, target, traffic):
"""
:param nx_edge tuple containing source and target
(nav.models.manage.Netbox or nav.netmap.stubs.Netbox)
:param source nav.models.manage.GwPortPrefix
:param target nav.models.manage.GwPortPrefix
:param prefixes list of prefixes (Prefix)
:returns metadata to attach to netmap graph
"""
# Note about GwPortPrefix and L3 graph: We always have interface.netbox
# avaiable under L3 topology graph due to stubbing Netboxes etc for
# elinks.
edge = Edge((nx_edge), source, target, traffic)
return edge
#return metadata
def edge_metadata_layer2(nx_edge, source, target, vlans_by_interface, traffic):
"""
Adds edge meta data with python types for given edge (layer2)
:param nx_edge tuple containing source and target
(nav.models.manage.Netbox or nav.netmap.stubs.Netbox)
:param source nav.models.manage.Interface (from port_pairs nx metadata)
:param target nav.models.manage.Interface (from port_pairs nx metadata)
:param vlans_by_interface VLAN dict access for fetching SwPortVlan list
:returns metadata to attach to netmap graph as metadata.
"""
edge = Edge(nx_edge, source, target, traffic)
source_vlans = target_vlans = []
if vlans_by_interface and source in vlans_by_interface:
source_vlans = tuple(vlans_by_interface.get(source))
if vlans_by_interface and target in vlans_by_interface:
target_vlans = tuple(vlans_by_interface.get(target))
#key=lambda x: x.vlan.vlan)
edge.source.vlans = set(source_vlans) - set(target_vlans)
edge.target.vlans = set(target_vlans) - set(source_vlans)
edge.vlans = set(source_vlans) | set(target_vlans)
return edge
| gpl-2.0 | 5,255,806,333,531,376,000 | 35.623679 | 80 | 0.592334 | false | 3.995157 | true | false | false |
xalt/xalt | old/altd/sbin/setDb.py | 1 | 3753 | #!/usr/bin/python -E
import sys,os
import MySQLdb
import ConfigParser
import base64
import time
import getpass
def readFromUser():
global HOST,USER,PASSWD,DB
HOST=raw_input("Database host:")
USER=raw_input("Database user:")
PASSWD=getpass.getpass("Database pass:")
DB=raw_input("Database name:")
def readConfig():
try:
global HOST,USER,PASSWD,DB
config=ConfigParser.ConfigParser()
config.read("altd_db.conf")
HOST=config.get("MYSQL","HOST")
USER=config.get("MYSQL","USER")
PASSWD=base64.b64decode(config.get("MYSQL","PASSWD"))
DB=config.get("MYSQL","DB")
except ConfigParser.NoOptionError as err:
sys.stderr.write("\nCannot parse the config file\n")
sys.stderr.write("Switch to user input mode...\n\n")
readFromUser()
def writeConfig():
config=ConfigParser.ConfigParser()
config.add_section("MYSQL")
config.set("MYSQL","HOST",HOST)
config.set("MYSQL","USER",USER)
config.set("MYSQL","PASSWD",base64.b64encode(PASSWD))
config.set("MYSQL","DB",DB)
t=time.strftime("%m%d%H%M%Y")
f=open('altd_db.'+t,'w')
config.write(f)
f.close()
os.chmod('altd_db.'+t,0640)
if(os.path.exists('altd_db.conf')):
os.remove('altd_db.conf')
os.symlink('altd_db.'+t,"altd_db.conf")
if(os.path.exists('altd_db.conf')):
print "ALTD database configuration file exists!"
q=raw_input("Do you want to use the file to fill database information?[y/n]")
if(q.lower() == "y"):
readConfig()
else:
readFromUser()
else:
readFromUser()
MACHINE=raw_input("Machine name:")
# connect to the MySQL server
try:
conn = MySQLdb.connect (HOST,USER,PASSWD)
except MySQLdb.Error as e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit (1)
# create database and related tables
try:
cursor = conn.cursor()
# If MySQL version < 4.1, comment out the line below
cursor.execute("SET SQL_MODE=\"NO_AUTO_VALUE_ON_ZERO\"")
# If the database does not exist, create it, otherwise, switch to the database.
cursor.execute("CREATE DATABASE IF NOT EXISTS %s DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci" % DB)
cursor.execute("USE "+DB)
# Table structure for table `altd_<MACHINE>_jobs`
cursor.execute("""
CREATE TABLE `altd_%s_jobs` (
`run_inc` int(11) NOT NULL auto_increment,
`tag_id` int(11) NOT NULL,
`executable` varchar(1024) NOT NULL,
`username` varchar(64) NOT NULL,
`run_date` date NOT NULL,
`job_launch_id` int(11) NOT NULL,
`build_machine` varchar(64) NOT NULL,
PRIMARY KEY (`run_inc`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 AUTO_INCREMENT=1
""" % MACHINE)
# Table structure for table `altd_<MACHINE>_link_tags`
cursor.execute("""
CREATE TABLE `altd_%s_link_tags` (
`tag_id` int(11) NOT NULL auto_increment,
`linkline_id` int(11) NOT NULL,
`username` varchar(64) NOT NULL,
`exit_code` tinyint(4) NOT NULL,
`link_date` date NOT NULL,
PRIMARY KEY (`tag_id`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 AUTO_INCREMENT=1
""" % MACHINE)
# Table structure for table `altd_<MACHINE>_linkline`
cursor.execute("""
CREATE TABLE `altd_%s_linkline` (
`linking_inc` int(11) NOT NULL auto_increment,
`linkline` varchar(4096) NOT NULL,
PRIMARY KEY (`linking_inc`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 AUTO_INCREMENT=1
""" % MACHINE)
cursor.close()
writeConfig()
except MySQLdb.Error as e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit (1)
| lgpl-2.1 | 1,474,715,430,726,963,000 | 30.016529 | 110 | 0.612843 | false | 3.318302 | true | false | false |
andrewcurtis/SSVMetric | src/PatternEvaluator.py | 1 | 9126 | # imports
import numpy as np
import scipy
import scipy.sparse.linalg
from scipy.sparse.linalg import ArpackNoConvergence
from scipy.sparse.linalg import ArpackError
import time
from SamplingPattern import SamplingPattern
from defaults import BASE_N
from utils import ft, ift, ft2, ift2, sumsq
class PatternEvaluator(object):
"""
PatternEvaluator
Co-ordinates computation of max and min singular values associated with
a given SamplingPattern of k-space sample loci.
"""
def __init__(self, base_sz = BASE_N, sens=[], max_tries=2):
super(PatternEvaluator, self).__init__()
# Base size
self.base_sz = base_sz
# SamplingPattern instance we want to test.
self.pattern = None
# init kernel for (optional) regularization
# regl'n not yet implemented
self.init_kern18()
# space for the vectors we need
self.xnew = np.zeros((self.base_sz, self.base_sz), dtype='complex')
self.xm = np.zeros((self.base_sz, self.base_sz), dtype='complex')
# actual array of sampling loci.
self.sampling = np.zeros((self.base_sz, self.base_sz), dtype='float')
# max repeats in case of arpack numerical problems
self.max_tries = max_tries
if sens:
self.sens = sens
def init_kern18(self):
"""
optimized sqrt(18) radius kernel for
spatial regularization filter
"""
self.root18 = np.zeros(32)
self.root18[1] = 0.04071725
self.root18[2] = 0.03499660
self.root18[4] = 0.02368359
self.root18[5] = 0.02522255
self.root18[8] = 0.02024067
self.root18[9] = 0.01407202
self.root18[10] = 0.01345276
self.root18[13] = 0.00850939
self.root18[16] = 0.00812839
self.root18[17] = 0.00491274
self.root18[18] = 0.00396661
def set_single_coil(self):
"""
set sensitivity to single uniform coil (e.g. test sampling
w/o sense )
"""
self.sens = np.ones([1, self.base_sz, self.base_sz], dtype='complex')
self.n_coils = self.sens.shape[0]
ss = sumsq(self.sens)
self.mask = np.ones([ self.base_sz, self.base_sz])>0
self.mask_sz = np.sum(self.mask.ravel())
def load_sens(self, fname, mask_eps=1e-6):
"""
load coil sensitivity and masking info from file.
Warning: assumes data size is (n_coils, nx, ny)
Looking for numpy npz file with variable 'sens'
Mask from sqrt-sum-of-squares of coil maps.
"""
fdat = np.load(fname)
#except error
self.sens = fdat['sens'].copy()
self.n_coils = self.sens.shape[0]
ss = sumsq(self.sens)
self.mask = ss > mask_eps
self.mask_sz = np.sum(self.mask.ravel())
#normalize coil maps
self.sens[:,self.mask] /= ss[self.mask]
def set_norm_fac(self, p):
"""
Adjust normalization factor. Used for testing overall
scaling behaviour of the system.
Use n_coils.
"""
if hasattr(p, 'norm_fac') and p.norm_fac > 0:
print 'Using pattern normfac of {}'.format(p.norm_fac)
self.norm_fac = p.norm_fac
else:
self.norm_fac = self.n_coils
print 'Using normfac of {}'.format(self.norm_fac)
def eval_pattern(self, pat):
"""
Main driver routine.
"""
self.pattern = pat
self.sampling = pat.sampling.copy().astype('float')
self.set_norm_fac(pat)
self.solve_high()
self.solve_low()
self.pattern.calcd = True
print pat.hi_eigs
print pat.low_eigs
def solve_high(self):
"""
co-ordinate calling ARPACK with our linear operator and get largest eigs
"""
t_start = time.time()
sysA = scipy.sparse.linalg.LinearOperator(
(self.mask_sz, self.mask_sz),
matvec=self.calc_AtA,
dtype='complex')
solved = False
for j in range(self.max_tries):
try:
a1,v1 = scipy.sparse.linalg.eigsh(
sysA,
k=self.pattern.n_eigs,
which='LM',
maxiter=self.pattern.iter_max,
tol=self.pattern.hitol,
ncv=self.pattern.ncv,
return_eigenvectors=True)
# sometimes it "solves" but with crazy errors ~1e+_300
if np.any(np.abs(a1) > self.n_coils):
continue
else:
solved = True
break
except ArpackError as e:
print e
if e.info == -8:
print('error on try {}'.format(j))
t_end = time.time()
print "Elapased: {}s".format(t_end - t_start)
self.pattern.hi_eigs = a1
def solve_low(self):
t_start = time.time()
sysA = scipy.sparse.linalg.LinearOperator(
(self.mask_sz, self.mask_sz),
matvec=self.calc_AtA,
dtype='complex')
solved = False
for j in range(self.max_tries):
try:
adyn,vdyn = scipy.sparse.linalg.eigsh(
sysA,
k=self.pattern.n_eigs,
which='SM',
maxiter=self.pattern.iter_max,
tol=self.pattern.tol,
ncv=self.pattern.ncv,
return_eigenvectors=True)
# sometimes it "solves" but with awful numerical problems
# this seems to be a function of a bad input vector, and typically
# is resolved by just running again. if we re-implement arpack
# we could probably find out why, but until then, we just check for
# strange values and re-compute.
if np.any(np.abs(adyn) > 1e3): # much bigger than nCoils ever will be
continue
else:
solved = True
break
except ArpackError as e:
print('Arpack error in solve_low {}'.format(e))
t_end = time.time()
print "Elapased: {}s".format(t_end - t_start)
self.pattern.low_eigs = adyn
if not solved:
self.pattern.low_eigs = -1
def calc_AtA(self, x0):
"""
calculate system matrix (normal equations)
"""
nSamp = np.sum(self.sampling)
maskSz = np.sum(self.mask)
nCoils, nv, npts = self.sens.shape
if x0.dtype <> np.complex128:
x0 = x0.astype('complex128')
x_img = x0
result = np.zeros(maskSz, dtype='complex')
# Compute A
A_back = sys_sense(x_img, self.sens, self.sampling>0, self.mask)
result[:] = A_back[:] / self.norm_fac #copy / flatten
return result
## --
# Rountines for the system matrix are below.
# To speed things up, we implement these python prototypes in C
#
# Note: fun testing w/ auto-jitting does little here.
#
# Interleaving of the FFT's and dot products are the main slowdown.
# Interestingly, python's default fftpack doesn't do a stellar job
# if we pass in a 3D array and ask for the 2D FT... We can look to move
# to a fftw wrapper in future.
#
# Instead, we overload PatternEvaluator.calc_AtA() to call some
# C functions via the CFFI that do fast dots and call FFTW.
# Its a bit messier for distribution since it requries compilation.
def sys_sense(im_mask, coils, pattern, mask):
"""
linear system for sense imaging
input 1d vector to iterator on (from arpack)
- insert into 2d image mask
- compute 2d FT's and dots with sens
- sample k space
- inverse
- extract
"""
nCoils, nv, npts = coils.shape
#print coils.shape
#print data.shape
image = np.zeros((nv, npts), dtype='complex128')
image[mask] = im_mask
nD = image.ndim
accum = 0.0
tmpGrad = []
zeroPat = pattern<1
gradient = np.zeros_like(im_mask)
ft_scale = 1.0/np.sqrt(nv*npts)
#compute one coil at a time to save working memory space
for c in range(nCoils):
coilPtr = coils[c,...]
# todo: zeropad
scratch = (coilPtr) * image
scratch = ift2(scratch)
# zero out non-sampled locations
scratch[zeroPat]=0
# ft back
scratch = ft2(scratch)
# todo: crop
scratch = np.conj(coilPtr) * scratch
# accumulate
gradient = gradient + scratch[mask]
gout = (gradient)
gout.shape = (-1)
return gout
| mit | -3,858,472,168,336,106,000 | 27.698113 | 86 | 0.53364 | false | 3.810438 | false | false | false |
iyer-arvind/PyFR | pyfr/backends/openmp/generator.py | 3 | 7202 | # -*- coding: utf-8 -*-
import re
from pyfr.backends.base.generator import BaseKernelGenerator
from pyfr.util import ndrange
class OpenMPKernelGenerator(BaseKernelGenerator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Specialise
self._dims = ['_nx'] if self.ndim == 1 else ['_ny', '_nx']
def render(self):
# Kernel spec
spec = self._emit_spec()
if self.ndim == 1:
body = self._emit_body_1d()
return '''
{spec}
{{
#pragma omp parallel
{{
int align = PYFR_ALIGN_BYTES / sizeof(fpdtype_t);
int cb, ce;
loop_sched_1d(_nx, align, &cb, &ce);
for (int _x = cb; _x < ce; _x++)
{{
{body}
}}
}}
}}'''.format(spec=spec, body=body)
else:
innerfn = self._emit_inner_func()
innercall = self._emit_inner_call()
return '''{innerfn}
{spec}
{{
#pragma omp parallel
{{
int align = PYFR_ALIGN_BYTES / sizeof(fpdtype_t);
int rb, re, cb, ce;
loop_sched_2d(_ny, _nx, align, &rb, &re, &cb, &ce);
for (int _y = rb; _y < re; _y++)
{{
{innercall}
}}
}}
}}'''.format(innerfn=innerfn, spec=spec,
innercall=innercall)
def _emit_inner_func(self):
# Get the specification and body
spec = self._emit_inner_spec()
body = self._emit_body_2d()
# Combine
return '''{spec}
{{
for (int _x = 0; _x < _nx; _x++)
{{
{body}
}}
}}'''.format(spec=spec, body=body)
def _emit_inner_call(self):
# Arguments for the inner function
iargs = ['ce - cb']
iargs.extend(sa.name for sa in self.scalargs)
for va in self.vectargs:
iargs.extend(self._offset_arg_array_2d(va))
return '{0}_inner({1});'.format(self.name, ', '.join(iargs))
def _emit_inner_spec(self):
# Inner dimension
ikargs = ['int _nx']
# Add any scalar arguments
ikargs.extend('{0.dtype} {0.name}'.format(sa) for sa in self.scalargs)
# Vector arguments (always arrays as we're 2D)
for va in self.vectargs:
const = 'const' if va.intent == 'in' else ''
stmt = '{0} {1.dtype} *__restrict__ {1.name}_v'.format(const, va)
stmt = stmt.strip()
if va.ncdim == 0:
ikargs.append(stmt)
else:
for ij in ndrange(*va.cdims):
ikargs.append(stmt + 'v'.join(str(n) for n in ij))
return ('static PYFR_NOINLINE void {0}_inner({1})'
.format(self.name, ', '.join(ikargs)))
def _emit_spec(self):
# We first need the argument list; starting with the dimensions
kargs = ['int ' + d for d in self._dims]
# Now add any scalar arguments
kargs.extend('{0.dtype} {0.name}'.format(sa) for sa in self.scalargs)
# Finally, add the vector arguments
for va in self.vectargs:
# Views
if va.isview:
kargs.append('{0.dtype}* __restrict__ {0.name}_v'.format(va))
kargs.append('const int* __restrict__ {0.name}_vix'
.format(va))
if va.ncdim >= 1:
kargs.append('const int* __restrict__ {0.name}_vcstri'
.format(va))
if va.ncdim == 2:
kargs.append('const int* __restrict__ {0.name}_vrstri'
.format(va))
# Arrays
else:
# Intent in arguments should be marked constant
const = 'const' if va.intent == 'in' else ''
kargs.append('{0} {1.dtype}* __restrict__ {1.name}_v'
.format(const, va).strip())
# If we are a matrix (ndim = 2) or a non-MPI stacked
# vector then a leading (sub) dimension is required
if self.ndim == 2 or (va.ncdim > 0 and not va.ismpi):
kargs.append('int lsd{0.name}'.format(va))
return 'void {0}({1})'.format(self.name, ', '.join(kargs))
def _emit_body_1d(self):
body = self.body
ptns = [r'\b{0}\b', r'\b{0}\[(\d+)\]', r'\b{0}\[(\d+)\]\[(\d+)\]']
for va in self.vectargs:
# Dereference the argument
darg = self._deref_arg(va)
# Substitute
body = re.sub(ptns[va.ncdim].format(va.name), darg, body)
return body
def _emit_body_2d(self):
body = self.body
ptns = [r'\b{0}\b', r'\b{0}\[(\d+)\]', r'\b{0}\[(\d+)\]\[(\d+)\]']
subs = ['{0}_v[_x]', r'{0}_v\1[_x]', r'{0}_v\1v\2[_x]']
for va in self.vectargs:
body = re.sub(ptns[va.ncdim].format(va.name),
subs[va.ncdim].format(va.name), body)
return body
def _deref_arg(self, arg):
if arg.isview:
ptns = ['{0}_v[{0}_vix[_x]]',
r'{0}_v[{0}_vix[_x] + {0}_vcstri[_x]*\1]',
r'{0}_v[{0}_vix[_x] + {0}_vrstri[_x]*\1'
r' + {0}_vcstri[_x]*\2]']
return ptns[arg.ncdim].format(arg.name)
else:
# Leading (sub) dimension
lsdim = 'lsd' + arg.name if not arg.ismpi else '_nx'
# Vector name_v[_x]
if arg.ncdim == 0:
ix = '_x'
# Stacked vector; name_v[lsdim*\1 + _x]
elif arg.ncdim == 1:
ix = r'{0}*\1 + _x'.format(lsdim)
# Doubly stacked vector; name_v[lsdim*nv*\1 + lsdim*\2 + _x]
else:
ix = r'{0}*{1}*\1 + {0}*\2 + _x'.format(lsdim, arg.cdims[1])
return '{0}_v[{1}]'.format(arg.name, ix)
def _offset_arg_array_2d(self, arg):
stmts = []
# Matrix; name + _y*lsdim + cb
if arg.ncdim == 0:
stmts.append('{0}_v + _y*lsd{0} + cb'.format(arg.name))
# Stacked matrix; name + (_y*nv + <0>)*lsdim + cb
elif arg.ncdim == 1:
stmts.extend('{0}_v + (_y*{1} + {2})*lsd{0} + cb'
.format(arg.name, arg.cdims[0], i)
for i in range(arg.cdims[0]))
# Doubly stacked matrix; name + ((<0>*_ny + _y)*nv + <1>)*lsdim + cb
else:
stmts.extend('{0}_v + (({1}*_ny + _y)*{2} + {3})*lsd{0} + cb'
.format(arg.name, i, arg.cdims[1], j)
for i, j in ndrange(*arg.cdims))
return stmts
| bsd-3-clause | 6,854,901,861,240,827,000 | 34.653465 | 78 | 0.425993 | false | 3.583085 | false | false | false |
AerisCloud/AerisCloud | aeriscloud/log.py | 1 | 1415 | import logging
import sys
LOGGING_FORMAT = '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
FILE_LOGGING_FORMAT = '%(asctime)s,%(levelname)s,%(name)s,%(message)s'
# captureWarnings exists only since 2.7
_nh = None
if sys.hexversion > 0x2070000:
logging.captureWarnings(True)
_nh = logging.NullHandler()
else:
# doesn't exists in older versions
class NullHandler(logging.Handler):
def emit(self, record):
pass
_nh = NullHandler()
logging.root.name = 'aeriscloud'
_logger = logging.root
_logger.addHandler(_nh)
# disable all logs
_logger.setLevel(60)
# prevent root logger from outputting
_logger.propagate = False
def get_logger(name=None, parent=_logger):
if name:
if not hasattr(parent, 'getChild'):
return parent.manager.getLogger('.'.join([parent.name, name]))
return parent.getChild(name)
return parent
def set_log_level(lvl):
# custom stream handler by default
if _nh in _logger.handlers:
_logger.removeHandler(_nh)
_handler = logging.StreamHandler()
_handler.setFormatter(logging.Formatter(LOGGING_FORMAT))
_logger.addHandler(_handler)
_logger.setLevel(lvl)
def set_log_file(filename):
_logger.removeHandler(_nh)
_file_handler = logging.FileHandler(filename)
_file_handler.setFormatter(logging.Formatter(FILE_LOGGING_FORMAT))
_logger.addHandler(_file_handler)
| mit | -6,147,397,567,664,631,000 | 26.745098 | 74 | 0.683392 | false | 3.618926 | false | false | false |
wimmuskee/ontolex-db | database/mysql.py | 1 | 22736 | # -*- coding: utf-8 -*-
import pymysql.cursors
import uuid
from urllib.parse import urlparse
"""
Some explanation for this set of functions.
set: put a partical subset in a class var
insert: insert one row without checks and dependencies, optionally commit
store: insert multiples and calling separate insert functions, always commits and has optional safemode
check: returns True or False
get: get an individual value
todo, find and get
"""
class Database:
def __init__(self,config):
self.host = config["host"]
self.user = config["user"]
self.passwd = config["passwd"]
self.name = config["name"]
self.lexicalEntries = []
self.lexicalEntryRelations = []
self.lexicalForms = []
self.lexicalProperties = []
self.lexicalSenses = []
self.senseReferences = []
self.lexicalEntryComponents = []
self.components = []
self.lexicalEntryLabels = {}
self.lexicalSenseDefinitions = {}
self.posses = {}
self.languages = {}
self.properties = {}
self.senserelations = []
self.entryrelations = {}
def connect(self):
self.DB = pymysql.connect(host=self.host,user=self.user, passwd=self.passwd,db=self.name,charset='utf8',use_unicode=1,cursorclass=pymysql.cursors.DictCursor)
def setPosses(self):
for row in self.__getRows("SELECT * FROM partOfSpeechVocabulary"):
self.posses[row["value"]] = row["id"]
def setLanguages(self):
for row in self.__getRows("SELECT * FROM languageVocabulary"):
self.languages[row["iso_639_1"]] = row["id"]
def setProperties(self):
for row in self.__getRows("SELECT * FROM propertyVocabulary"):
key = self.__getUrlPart(row["property"]) + ":" + self.__getUrlPart(row["value"])
self.properties[key] = row["id"]
def setEntryRelations(self):
for row in self.__getRows("SELECT * FROM relationVocabulary"):
key = "lexinfo:" + self.__getUrlPart(row["relation"])
self.entryrelations[key] = row["relationID"]
def setSenseRelations(self):
""" Should not be manual, but for now there is validation. """
self.senserelations.extend( ["ontolex:reference"] )
self.senserelations.extend( ["lexinfo:antonym", "lexinfo:synonym", "lexinfo:pertainsTo", "lexinfo:relatedTerm", "lexinfo:hypernym"] )
def setLexicalEntries(self):
query = "SELECT lexicalEntryID, class, pos.value AS pos_value, lex.identifier AS lex_identifier FROM lexicalEntry AS lex \
LEFT JOIN partOfSpeechVocabulary AS pos ON lex.partOfSpeechID = pos.id"
self.lexicalEntries = self.__getRows(query)
def setLexicalEntry(self,lexicalEntryID):
query = "SELECT lexicalEntryID, class, pos.value AS pos_value, identifier AS lex_identifier FROM lexicalEntry AS lex \
LEFT JOIN partOfSpeechVocabulary AS pos ON lex.partOfSpeechID = pos.id \
WHERE lexicalEntryID = %s"
self.lexicalEntries = self.__getRows(query,(lexicalEntryID))
def setLexicalEntryRelations(self):
query = "SELECT lex.identifier AS lex_identifier, entryrel.reference, vocab.relation FROM lexicalEntryRelation AS entryrel \
LEFT JOIN lexicalEntry AS lex ON entryrel.lexicalEntryID = lex.lexicalEntryID \
LEFT JOIN relationVocabulary AS vocab ON entryrel.relationID = vocab.relationID"
self.lexicalEntryRelations = self.__getRows(query)
def setLexicalEntryRelationsByID(self,lexicalEntryID):
query = "SELECT lex.identifier AS lex_identifier, entryrel.reference, vocab.relation FROM lexicalEntryRelation AS entryrel \
LEFT JOIN lexicalEntry AS lex ON entryrel.lexicalEntryID = lex.lexicalEntryID \
LEFT JOIN relationVocabulary AS vocab ON entryrel.relationID = vocab.relationID \
WHERE entryrel.lexicalEntryID = %s"
self.lexicalEntryRelations = self.__getRows(query,(lexicalEntryID))
def setLexicalForms(self,lang_id):
query = "SELECT form.lexicalEntryID, form.lexicalFormID, type, rep.value AS rep_value, lex.identifier AS lex_identifier, form.identifier AS form_identifier, rep.syllableCount AS syllableCount FROM lexicalForm AS form \
LEFT JOIN lexicalEntry AS lex ON form.lexicalEntryID = lex.lexicalEntryID \
LEFT JOIN writtenRep AS rep ON form.lexicalFormID = rep.lexicalFormID \
WHERE rep.languageID = %s"
self.lexicalForms = self.__getRows(query,(lang_id))
def setLexicalForm(self,lexicalEntryID,lang_id):
query = "SELECT form.lexicalEntryID, form.lexicalFormID, type, rep.value AS rep_value, lex.identifier AS lex_identifier, form.identifier AS form_identifier, rep.syllableCount AS syllableCount FROM lexicalForm AS form \
LEFT JOIN lexicalEntry AS lex ON form.lexicalEntryID = lex.lexicalEntryID \
LEFT JOIN writtenRep AS rep ON form.lexicalFormID = rep.lexicalFormID \
WHERE form.lexicalEntryID = %s \
AND rep.languageID = %s"
self.lexicalForms.extend(self.__getRows(query,(lexicalEntryID,lang_id)))
def setLexicalFormsByEntries(self,lang_id):
for entry in self.lexicalEntries:
self.setLexicalForm(entry["lexicalEntryID"],lang_id)
def setLexicalEntryLabels(self):
""" Sets easy lookup labels for use in setLexicalSenses without needing big joins.
Called seperately because setLexicalFormsByEntries calls setLexicalForm. """
for form in self.lexicalForms:
if form["type"] == "canonicalForm":
self.lexicalEntryLabels[form["lexicalEntryID"]] = form["rep_value"]
def setLexicalFormProperties(self):
query = "SELECT form.identifier AS form_identifier, vocab.property, vocab.value FROM formProperties AS formprop \
LEFT JOIN lexicalForm AS form ON formprop.lexicalFormID = form.lexicalFormID \
LEFT JOIN propertyVocabulary AS vocab ON formprop.propertyID = vocab.id"
self.lexicalProperties = self.__getRows(query)
def setLexicalFormPropertiesByID(self):
for form in self.lexicalForms:
query = "SELECT form.identifier AS form_identifier, vocab.property, vocab.value FROM formProperties AS formprop \
LEFT JOIN lexicalForm AS form ON formprop.lexicalFormID = form.lexicalFormID \
LEFT JOIN propertyVocabulary AS vocab ON formprop.propertyID = vocab.id \
WHERE formprop.lexicalFormID = %s"
self.lexicalProperties.extend(self.__getRows(query,(form["lexicalFormID"])))
def setLexicalSenses(self):
query = "SELECT sense.lexicalSenseID, sense.lexicalEntryID, lex.identifier AS lex_identifier, sense.identifier AS sense_identifier FROM lexicalSense AS sense \
LEFT JOIN lexicalEntry AS lex ON sense.lexicalEntryID = lex.lexicalEntryID"
self.lexicalSenses = self.__getRows(query)
def setLexicalSensesByID(self,lexicalEntryID):
query = "SELECT sense.lexicalSenseID, sense.lexicalEntryID, lex.identifier AS lex_identifier, sense.identifier AS sense_identifier FROM lexicalSense AS sense \
LEFT JOIN lexicalEntry AS lex ON sense.lexicalEntryID = lex.lexicalEntryID \
WHERE sense.lexicalEntryID = %s"
self.lexicalSenses.extend(self.__getRows(query,(lexicalEntryID)))
def setLexicalSensesByEntries(self):
for entry in self.lexicalEntries:
self.setLexicalSensesByID(entry["lexicalEntryID"])
def setSenseDefinitions(self,lang_id):
""" Definition is optional."""
for sense in self.lexicalSenses:
query = "SELECT value FROM senseDefinition WHERE lexicalSenseID = %s AND languageID = %s"
row = self.__getRow(query,(sense["lexicalSenseID"],lang_id))
if row:
self.lexicalSenseDefinitions[sense["sense_identifier"]] = row["value"]
def setSenseReferences(self):
query = "SELECT sense.identifier AS sense_identifier, namespace, property, reference FROM senseReference \
LEFT JOIN lexicalSense AS sense ON senseReference.lexicalSenseID = sense.lexicalSenseID"
self.senseReferences = self.__getRows(query)
def setSenseReferencesByID(self):
for sense in self.lexicalSenses:
query = "SELECT sense.identifier AS sense_identifier, namespace, property, reference FROM senseReference \
LEFT JOIN lexicalSense AS sense ON senseReference.lexicalSenseID = sense.lexicalSenseID \
WHERE senseReference.lexicalSenseID = %s"
self.senseReferences.extend(self.__getRows(query,(sense["lexicalSenseID"])))
def setLexicalComponents(self):
query = "SELECT lex.identifier AS lex_identifier, comp.identifier AS comp_identifier, position FROM lexicalEntryComponent AS lexcomp \
LEFT JOIN lexicalEntry AS lex ON lexcomp.lexicalEntryID = lex.lexicalEntryID \
LEFT JOIN component AS comp ON lexcomp.componentID = comp.componentID"
self.lexicalEntryComponents.extend(self.__getRows(query))
def setLexicalComponentsByID(self,lexicalEntryID):
query = "SELECT lex.identifier AS lex_identifier, comp.identifier AS comp_identifier, position FROM lexicalEntryComponent AS lexcomp \
LEFT JOIN lexicalEntry AS lex ON lexcomp.lexicalEntryID = lex.lexicalEntryID \
LEFT JOIN component AS comp ON lexcomp.componentID = comp.componentID \
WHERE lexcomp.lexicalEntryID = %s"
self.lexicalEntryComponents.extend(self.__getRows(query,(lexicalEntryID)))
def setComponents(self):
query = "SELECT DISTINCT comp.identifier AS comp_identifier, lex.identifier AS lex_identifier, form.identifier AS form_identifier FROM component AS comp \
LEFT JOIN lexicalEntry AS lex ON comp.lexicalEntryID = lex.lexicalEntryID \
LEFT JOIN lexicalForm AS form ON comp.lexicalFormID = form.lexicalFormID"
self.components.extend(self.__getRows(query))
def setComponentsByID(self,lexicalEntryID,lang_id):
query = "SELECT DISTINCT comp.identifier AS comp_identifier, lex.identifier AS lex_identifier, form.identifier AS form_identifier, rep.value AS rep_value FROM component AS comp \
LEFT JOIN lexicalEntryComponent AS lexcomp ON comp.componentID = lexcomp.componentID \
LEFT JOIN lexicalEntry AS lex ON comp.lexicalEntryID = lex.lexicalEntryID \
LEFT JOIN lexicalForm AS form ON comp.lexicalFormID = form.lexicalFormID \
LEFT JOIN writtenRep AS rep ON form.lexicalFormID = rep.lexicalFormID \
WHERE lexcomp.lexicalEntryID = %s \
AND rep.languageID = %s"
self.components.extend(self.__getRows(query,(lexicalEntryID,lang_id)))
# and add single component to output (but not connected to actual component part of lexicalEntry
# useful for management, and checking loose components
if not self.components:
query = "SELECT DISTINCT comp.identifier AS comp_identifier, lex.identifier AS lex_identifier, form.identifier AS form_identifier, rep.value AS rep_value FROM component AS comp \
LEFT JOIN lexicalEntry AS lex ON comp.lexicalEntryID = lex.lexicalEntryID \
LEFT JOIN lexicalForm AS form ON comp.lexicalFormID = form.lexicalFormID \
LEFT JOIN writtenRep AS rep ON form.lexicalFormID = rep.lexicalFormID \
WHERE lex.lexicalEntryID = %s \
AND rep.languageID = %s"
self.components.extend(self.__getRows(query,(lexicalEntryID,lang_id)))
def saveVerbPastSingular(self,lexicalEntryID,value,lang_id):
lex_id = self.getID(lexicalEntryID,"lexicalEntry")
form_id = self.storeOtherForm(lex_id,value,lang_id)
self.insertFormProperty(form_id,self.properties["tense:past"],True)
self.insertFormProperty(form_id,self.properties["number:singular"],True)
def saveVerbPastParticiple(self,lexicalEntryID,value,lang_id):
lex_id = self.getID(lexicalEntryID,"lexicalEntry")
# store with safemode False
form_id = self.storeOtherForm(lex_id,value,lang_id,False)
self.insertFormProperty(form_id,self.properties["tense:past"],True)
self.insertFormProperty(form_id,self.properties["verbFormMood:participle"],True)
def getLexicalEntryID(self,value,partOfSpeechID):
query = "SELECT lexicalEntryID FROM lexicalEntry WHERE value = %s AND partOfSpeechID = %s"
row = self.__getRow(query,(value,partOfSpeechID))
return row["lexicalEntryID"]
def getLexicalSenseID(self,lexicalEntryID):
query = "SELECT lexicalSenseID FROM lexicalSense WHERE lexicalEntryID = %s"
row = self.__getRow(query,(lexicalEntryID))
return row["lexicalSenseID"]
def getID(self,identifier,table):
""" Return the real database ID from either entry, form or sense, based on identifier. """
field = table + "ID"
query = "SELECT " + field + " FROM " + table + " WHERE identifier = %s"
row = self.__getRow(query,(identifier))
return row[field]
def getIdentifier(self,id,table):
""" Return the identifier from either entry, form or sense, based on the real DB id. """
field = table + "ID"
query = "SELECT identifier FROM " + table + " WHERE " + field + " = %s"
row = self.__getRow(query,(id))
return row["identifier"]
def getCountlexicalSenses(self,lexicalEntryID):
query = "SELECT count(*) AS count FROM lexicalSense WHERE lexicalEntryID = %s"
row = self.__getRow(query,(lexicalEntryID))
return int(row["count"])
def getWrittenRepsWithoutSyllableCount(self,lang_id):
query = "SELECT DISTINCT value FROM writtenRep WHERE syllableCount IS NULL AND languageID = %s"
return self.__getRows(query,(lang_id))
def checkSenseReferenceExists(self,lexicalSenseID,relation,reference):
namespace = relation.split(":")[0]
property = relation.split(":")[1]
query = "SELECT * FROM senseReference WHERE lexicalSenseID = %s AND namespace = %s AND property = %s AND reference = %s"
row = self.__getRow(query,(lexicalSenseID,namespace,property,reference))
if row:
return True
else:
return False
def checkLexicalEntryRelationExists(self,lexicalEntryID,relation,reference):
query = "SELECT * FROM lexicalEntryRelation WHERE lexicalEntryID = %s AND relationID = %s AND reference = %s"
row = self.__getRow(query,(lexicalEntryID,relation,reference))
if row:
return True
else:
return False
def checkLexicalFormPropertyExists(self,lexicalFormID,propertyID):
query = "SELECT * FROM formProperties WHERE lexicalFormID = %s AND propertyID = %s"
row = self.__getRow(query,(lexicalFormID,propertyID))
if row:
return True
else:
return False
def storeCanonical(self,word,lang_id,pos_id,safemode=True):
""" Stores new lexicalEntry and canonicalForm if entry does not exist."""
if self.findLexicalEntry(word,pos_id) and safemode:
print("found this entry already: " + word)
return None
lexicalEntryID = self.insertLexicalEntry(word,pos_id)
lexicalFormID = self.insertLexicalForm(lexicalEntryID,"canonicalForm")
self.insertWrittenRep(lexicalFormID,word,lang_id)
# store infinitive form for verb
if pos_id == self.posses["verb"]:
self.insertFormProperty(lexicalFormID,self.properties["verbFormMood:infinitive"])
self.DB.commit()
return lexicalEntryID
def storeOtherForm(self,lexicalEntryID,word,lang_id,safemode=True):
if self.findlexicalForm(lexicalEntryID,word,lang_id) and safemode:
print("found this form already: " + word)
return None
lexicalFormID = self.insertLexicalForm(lexicalEntryID,"otherForm")
self.insertWrittenRep(lexicalFormID,word,lang_id)
self.DB.commit()
return lexicalFormID
def storeFormProperties(self,lexicalFormID,properties,safemode=True):
# no safemode yet
for property in properties:
# p in form <property>:<value>
self.insertFormProperty(lexicalFormID,self.properties[property])
self.DB.commit()
def storeLexicalSense(self,lexicalEntryID,relation,reference,safemode=True):
""" Adds lexicalSense to lexicxalEntry, and adds a relation. """
senseCount = self.getCountlexicalSenses(lexicalEntryID)
if senseCount == 0:
# no senses yet, we can safely add a sense and a relation
lexicalSenseID = self.insertLexicalSense(lexicalEntryID)
self.insertSenseReference(lexicalSenseID,relation,reference)
elif senseCount == 1:
# asume we're adding to this sense, retrieve the senseID and add reference if not exists
lexicalSenseID = self.getLexicalSenseID(lexicalEntryID)
if not self.checkSenseReferenceExists(lexicalSenseID,relation,reference):
self.insertSenseReference(lexicalSenseID,relation,reference)
else:
lexicalSenseID = None
self.DB.commit()
return lexicalSenseID
def storeLexicalEntryRelation(self,lexicalEntryID,relation,reference):
""" Check whether relation already exists, and if not, adds it. """
if not self.checkLexicalEntryRelationExists(lexicalEntryID,relation,reference):
self.insertLexicalEntryRelation(lexicalEntryID,relation,reference)
self.DB.commit()
def storeComponent(self,lexicalFormID):
""" Stores component, based on lexicalFormID. """
query = "SELECT lexicalEntryID FROM lexicalForm WHERE lexicalFormID = %s"
row = self.__getRow(query,(lexicalFormID))
if row:
return self.insertComponent(row["lexicalEntryID"],lexicalFormID,True)
else:
return "failed"
def findLexicalEntry(self,word,pos_id):
query = "SELECT lexicalEntryID FROM lexicalEntry WHERE value = %s AND partOfSpeechID = %s"
row = self.__getRow(query,(word,pos_id))
if row:
return row["lexicalEntryID"]
else:
return None
def findlexicalForm(self,lexicalEntryID,word,lang_id):
query = "SELECT form.lexicalFormID FROM lexicalForm AS form \
LEFT JOIN writtenRep AS rep ON form.lexicalFormID = rep.lexicalFormID \
WHERE form.lexicalEntryID = %s \
AND rep.value = %s \
AND rep.languageID = %s"
row = self.__getRow(query,(lexicalEntryID,word,lang_id))
if row:
return row["lexicalFormID"]
else:
return None
def insertLexicalEntry(self,word,pos_id,commit=False):
c = self.DB.cursor()
entryclass = "Word"
if word.count(" ") > 0:
entryclass = "MultiwordExpression"
identifier = "urn:uuid:" + str(uuid.uuid4())
query = "INSERT INTO lexicalEntry (value,identifier,partOfSpeechID,class) VALUES (%s,%s,%s,%s)"
c.execute(query, (word,identifier,pos_id,entryclass))
lexicalEntryID = c.lastrowid
c.close()
if commit:
self.DB.commit()
return lexicalEntryID
def insertLexicalEntryRelation(self,lexicalEntryID,relationID,reference,commit=False):
c = self.DB.cursor()
query = "INSERT INTO lexicalEntryRelation (lexicalEntryID,relationID,reference) VALUES (%s,%s,%s)"
c.execute(query, (lexicalEntryID,relationID,reference))
c.close()
if commit:
self.DB.commit()
def insertLexicalForm(self,lexicalEntryID,type,commit=False):
c = self.DB.cursor()
identifier = "urn:uuid:" + str(uuid.uuid4())
query = "INSERT INTO lexicalForm (lexicalEntryID,identifier,type) VALUES (%s,%s,%s)"
c.execute(query, (lexicalEntryID,identifier,type))
lexicalFormID = c.lastrowid
c.close()
if commit:
self.DB.commit()
return lexicalFormID
def insertWrittenRep(self,lexicalFormID,word,lang_id,commit=False):
c = self.DB.cursor()
query = "INSERT INTO writtenRep (lexicalFormID,languageID,value) VALUES (%s,%s,%s)"
c.execute(query, (lexicalFormID,lang_id,word))
c.close()
if commit:
self.DB.commit()
def insertFormProperty(self,lexicalFormID,propertyID,commit=False):
if self.checkLexicalFormPropertyExists(lexicalFormID,propertyID):
return
c = self.DB.cursor()
query = "INSERT INTO formProperties (lexicalFormID,propertyID) VALUES (%s,%s)"
c.execute(query, (lexicalFormID,propertyID))
c.close()
if commit:
self.DB.commit()
def insertLexicalSense(self,lexicalEntryID,commit=False):
""" Insert lexicalSense, and optionally commit."""
c = self.DB.cursor()
identifier = "urn:uuid:" + str(uuid.uuid4())
query = "INSERT INTO lexicalSense (lexicalEntryID,identifier) VALUES (%s,%s)"
c.execute(query, (lexicalEntryID,identifier))
lexicalSenseID = c.lastrowid
c.close()
if commit:
self.DB.commit()
return lexicalSenseID
def insertLexicalSenseDefinition(self,lexicalSenseID,languageID,definition,commit=False):
c = self.DB.cursor()
query = "INSERT INTO senseDefinition (lexicalSenseID,languageID,value) VALUES (%s,%s,%s)"
c.execute(query, (lexicalSenseID,languageID,definition))
c.close()
if commit:
self.DB.commit()
def insertSenseReference(self,lexicalSenseID,relation,reference,commit=False):
c = self.DB.cursor()
namespace = relation.split(":")[0]
property = relation.split(":")[1]
query = "INSERT INTO senseReference (lexicalSenseID,namespace,property,reference) VALUES (%s,%s,%s,%s)"
c.execute(query, (lexicalSenseID,namespace,property,reference))
c.close()
if commit:
self.DB.commit()
def insertComponent(self,lexicalEntryID,lexicalFormID,commit=False):
c = self.DB.cursor()
# we should have a checkExists for this
query = "SELECT componentID FROM component WHERE lexicalEntryID = %s AND lexicalFormID = %s"
c.execute(query,(lexicalEntryID,lexicalFormID))
row = c.fetchone()
if row:
return row["componentID"]
else:
identifier = "urn:uuid:" + str(uuid.uuid4())
query = "INSERT INTO component (identifier,lexicalEntryID,lexicalFormID) VALUES (%s,%s,%s)"
c.execute(query,(identifier,lexicalEntryID,lexicalFormID))
componentID = c.lastrowid
c.close()
if commit:
self.DB.commit()
return componentID
def insertLexicalEntryComponent(self,lexicalEntryID,componentID,position,commit=False):
c = self.DB.cursor()
# more another checkExists, where nothing is returned
query = "SELECT * FROM lexicalEntryComponent WHERE lexicalEntryID = %s AND componentID = %s AND position = %s"
c.execute(query,(lexicalEntryID,componentID,position))
row = c.fetchone()
if not row:
query = "INSERT INTO lexicalEntryComponent (lexicalEntryID,componentID,position) VALUES (%s,%s,%s)"
c.execute(query,(lexicalEntryID,componentID,position))
c.close()
if commit:
self.DB.commit()
def updateLexicalEntryValue(self,lexicalEntryID,label,languageID):
c = self.DB.cursor()
# find canonicalForm
query = "SELECT * FROM lexicalForm WHERE lexicalEntryID = %s AND type = 'canonicalForm'"
c.execute(query, (lexicalEntryID))
canonicalform = c.fetchone()
# update entry and writtenrep
query = "UPDATE lexicalEntry SET value = %s WHERE lexicalEntryID = %s"
c.execute(query, (label,lexicalEntryID))
query = "UPDATE writtenRep SET value = %s WHERE lexicalFormID = %s AND languageID = %s"
c.execute(query,(label,canonicalform["lexicalFormID"],languageID))
c.close()
self.DB.commit()
def updateLexicalEntryPOS(self,lexicalEntryID,partOfSpeechID):
c = self.DB.cursor()
query = "UPDATE lexicalEntry SET partOfSpeechID = %s WHERE lexicalEntryID = %s"
c.execute(query, (partOfSpeechID,lexicalEntryID))
c.close()
self.DB.commit()
def updateSyllableCount(self,value,syllableCount,languageID):
c = self.DB.cursor()
query = "UPDATE writtenRep SET syllableCount = %s WHERE value = %s AND languageID = %s"
c.execute(query,(syllableCount,value,languageID))
c.close()
self.DB.commit()
def __getRow(self,query,args=None):
c = self.DB.cursor()
c.execute(query,args)
row = c.fetchone()
c.close()
return row
def __getRows(self,query,args=None):
c = self.DB.cursor()
c.execute(query,args)
rows = c.fetchall()
c.close()
return rows
def __getUrlPart(self,url):
""" Helper function to get the last part of the property url."""
parsed = urlparse(url)
if parsed.fragment:
return parsed.fragment
else:
return parsed.path.split('/')[-1]
| mit | -5,469,644,514,981,064,000 | 37.083752 | 220 | 0.747229 | false | 3.176306 | false | false | false |
senser/xmppBot | ZenPacks/community/xmppBot/Jabber/plugins/graph.py | 1 | 6750 | """Extract graphs."""
from Jabber.Plugins import Plugin
from Jabber.ZenAdapter import ZenAdapter
from Jabber.Options import Options
from optparse import OptionError
class Graph(Plugin):
name = 'graph'
capabilities = ['graph', 'help']
def call(self, args, log, **kw):
#Dirty hack to make it work with multiword options (they must be in '' or "")
i=-1
appnd=False
args1=[]
for arg in args:
if appnd:
args1[i]+=' '+arg.replace("'",'').replace('"','')
else:
i+=1
args1.append(arg.replace("'",'').replace('"',''))
if arg[0] in ('"', "'"): appnd=True
if arg[-1] in ('"', "'"): appnd=False
args=args1
log.debug('Graph extraction plugin running with arguments %s' % args)
opts = self.options()
adapter = ZenAdapter()
try:
(options, arguments) = opts.parse_args(args)
log.debug('Done parsing arguments. Options are "%s", arguments expanded to %s' % (options, arguments))
except OptionError, message:
return str(message)
if options.deviceName is None or (not options.list and options.graphName is None):
return 'NO. You must specify both device and graph with -d and -g.'
devices = adapter.devices(options.deviceName)
if len(devices) == 0:
return 'Cannot find a device, ip or mac for "%s"' % options.deviceName
log.debug('Found %d devices matching %s' % (len(devices), devices))
if options.list:
message=''
if options.subComponent:
for device in devices:
componentList = adapter.components(device, options.subComponent)
if componentList:
for component in componentList:
for validGraph in component.getDefaultGraphDefs():
message += validGraph['title'] + ' (' + component.absolute_url_path().split(device.id)[1][1:] + ')\n'
else:
for device in devices:
for validGraph in device.getDefaultGraphDefs():
message += validGraph['title'] + '\n'
return 'Valid graphs:\n' + message
log.debug('Going to look for graph %s' % options.graphName)
# rrdtool cannot accept arguments in unicode, so convert graphName to ascii first
message = self.obtainValues(adapter, devices, options.graphName.encode('ascii', 'ignore'), options.subComponent, log)
return message
def obtainValues(self, adapter, devices, graph, component, log):
import time
message = ''
log.debug('Have %d devices to check for %s' % (len(devices), graph))
for device in devices:
log.debug('Checking %s. For the graph %s' % (device.id, graph))
# try to get it directly from the device first.
if self.hasGraph(device, graph):
log.debug('The device %s does have the graph %s' % (device.id, graph))
message += '%s %s: %s\n' % (device.name(), graph, self.shorten(self.upload(self.render(device.getGraphDefUrl(graph)), device.name() + '/' + graph.replace(' ', '_') + '_' + time.strftime('%Y%m%d_%H%M%S',time.localtime()) +'.png')))
elif component is not None:
compList = adapter.components(device, component)
if not compList:
return 'Sorry. Cannot find a component %s on %s' % (component, device)
if len(compList)>1:
return 'Multiple components found. Please, define more exaclty.'
comp=compList[0]
log.debug('Looking for graph %s in component %s' % (graph, comp.name()))
if self.hasGraph(comp, graph):
message += '%s %s %s: %s\n' % (device.name(), component, graph, self.shorten(self.upload(self.render(comp.getGraphDefUrl(graph)), device.name() + comp.absolute_url_path()[comp.absolute_url_path().find(device.id)+len(device.id):] + '/' + graph.replace(' ', '_') +'_' + time.strftime('%Y%m%d_%H%M%S',time.localtime()) +'.png')))
else:
message += '%s %s: Does not have a graph named %s. Remember, spelling and case matter. Try -l for a list of graphs' % (device.name(), component, graph)
else:
message += '%s: Unable to find the graph %s. Remember, spelling and case matter. Try -l for a list of graphs' % (device.name(), graph)
return message
def hasGraph(self, entity, graph):
hasGr = False
for gr in entity.getDefaultGraphDefs():
if gr['title'] == graph:
hasGr = True
break
return hasGr
def render(self, url):
from urlparse import urlparse, parse_qsl
import StringIO
png = StringIO.StringIO()
from Products.ZenRRD.zenrender import RenderServer
png.write(eval('RenderServer("").render('+','.join(['%s="%s"' % k for k in parse_qsl(urlparse(url)[4])])+')'))
png.seek(0)
return png
def upload(self, strObj, saveAs):
import ftplib
con = ftplib.FTP('ftp.nm.ru', 'zenbot', 'qwe123#')
#create path if it doesn't exists and cwd to it
for dir in saveAs.split('/')[:-1]:
try:
con.cwd(dir)
except ftplib.error_perm:
con.mkd(dir)
con.cwd(dir)
con.storbinary('STOR ' + saveAs.split('/')[-1], strObj)
con.quit()
return 'http://zenbot.nm.ru/' + saveAs
def shorten(self,url):
import urllib2
html=urllib2.urlopen("http://tinyurl.com/create.php?url=%s" % url).read()
return html[html.find("<b>http://tinyurl.com/")+3:html.find("</b>",html.find("<b>http://tinyurl.com/"))]
def private(self):
return False
# parse the options
def options(self):
parser = Options(description = 'Retrieve graph. Simple example:\n graph -d 10.1.1.1 -g IO', prog = 'graph')
parser.add_option('-d', '--device', dest='deviceName', help='Device name, IP or MAC.')
parser.add_option('-g', '--graph', dest='graphName', help='Name of graph.')
parser.add_option('-l', '--list', dest='list', action='store_true', help='Only list graphs for the device and/or component.')
parser.add_option('-s', '--subcomponent', dest='subComponent', help='Optional subcomponent name, if the graph does not reside directly on the device. You will probably have to specify this.')
return parser
def help(self):
opts = self.options()
return str(opts.help())
| gpl-2.0 | 2,868,993,540,534,500,000 | 47.561151 | 347 | 0.567852 | false | 3.982301 | false | false | false |
Chromium97/lnmiithackathon | website/settings.py | 1 | 2237 | """
Django settings for website project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y8dz)a%5b0+-bgb=2(1ry1pt41rbng1x41cruigaht9c-n(yn='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'query',
#'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'website.urls'
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| mit | -621,911,359,711,887,500 | 23.855556 | 72 | 0.725078 | false | 3.284875 | false | false | false |
CornellProjects/hlthpal | web/project/main/views.py | 1 | 28275 | import os
import datetime
from django.utils.dateparse import parse_datetime
import collections, json
from django.http import HttpResponse, JsonResponse
from django.http import HttpResponseForbidden
from django.shortcuts import render
from django.template import loader
from django.contrib.auth import get_user_model
from rest_framework.authentication import TokenAuthentication
from django.utils.encoding import smart_str
from django.core.urlresolvers import reverse_lazy
from django.db.models.signals import post_save
from django.core.mail import send_mail
from django.conf import settings
from wsgiref.util import FileWrapper
from rest_framework.decorators import detail_route
from rest_framework import generics
from django.db import IntegrityError
# Custom models
from .models import Record, Answer, Entity, Question, Symptom, Notes, Patient, Log, Doctor
# Serializers import
from .serializers import (
UserCreateSerializer,
UserLoginSerializer,
UserProfileSerializer,
UserGetSerializer,
AnswerSerializer,
AnswerGetSerializer,
RecordSerializer,
# DoctorCreateSerializer,
DoctorSerializer,
EntityCreateSerializer,
QuestionGetSerializer,
SymptomSerializer,
SymptomGetSerializer,
QuestionSerializer,
NotesCreateSerializer,
NotesGetSerializer,
PatientActivateSerializer,
PatientGetSerializer,
PatientSectorSerializer,
PatientStatusGetSerializer,
PatientScoreGetSerializer,
PatientRecordGetSerializer)
# rest_framework imports
from rest_framework import status
from rest_framework import filters
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.generics import (
CreateAPIView,
ListAPIView,
UpdateAPIView,
ListCreateAPIView,
RetrieveUpdateDestroyAPIView,
get_object_or_404)
# Import permissions
from rest_framework.permissions import (
AllowAny,
IsAuthenticated,
IsAdminUser,
)
from .permissions import IsOwner
from .emails import send_user_registration_emails
User = get_user_model()
#####################################################################################
# Set up trigger for registration email
if os.environ.get('DJANGO_SEND_EMAIL'):
post_save.connect(send_user_registration_emails, sender=User)
######################################################################################
# Build paths
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DOWNLOADS_DIR = BASE_DIR + '/downloads/'
RELEASE_APK = 'app-release.apk'
######################################################################################
# Method based views
# endpoint for '/home'
def index(request):
#get the template
template = loader.get_template('index.html')
data = {'images' : settings.MEDIA_URL}
return HttpResponse(template.render(data))
# endpoint for '/dashboard'
def dashboard(request):
#get the template
template = loader.get_template('dashboard.html')
return HttpResponse(template.render())
# Method based views
# endpoint for '/home'
def download_android(request):
file_name = DOWNLOADS_DIR + RELEASE_APK;
#file_size = os.stat(file).st_size
file_size = os.path.getsize(file_name)
wrapper = FileWrapper(file(file_name))
response = HttpResponse(wrapper, content_type='application/vnd.android.package-archive')
response['Content-Disposition'] = 'attachment; filename=%s' % smart_str(RELEASE_APK)
response['Content-Length'] = file_size
return response
######################################################################################
# Class based user views
class UserCreateView(CreateAPIView):
'''API to create a new user '''
serializer_class = UserCreateSerializer
permission_classes = [AllowAny]
queryset = User.objects.all()
def post(self, request, *args, **kwargs):
serializer = UserCreateSerializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
if not request.user.is_anonymous:
Log.objects.create(user=request.user, activity='add_new_patient')
return self.create(request, *args, **kwargs)
except:
# print(serializer.errors)
# print('Errors')
if not request.user.is_anonymous:
Log.objects.create(user=request.user, activity='fail_add_new_patient') # failed sign in or sign out.
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserLoginView(APIView):
'''API to login and obtain an auth token'''
serializer_class = UserLoginSerializer
permission_classes = [AllowAny]
queryset = User.objects.all()
def post(self, request, *args, **kwargs):
data = request.data
serializer = UserLoginSerializer(data=data)
if serializer.is_valid(raise_exception=True):
result = serializer.data
# Only return token
if result.has_key('username'):
result.pop('username')
if result.has_key('email'):
result.pop('email')
if not request.user.is_anonymous:
Log.objects.create(user=request.user, activity='success_sign_in')
return Response(result, status=status.HTTP_200_OK)
else:
if not request.user.is_anonymous:
Log.objects.create(user=request.user, activity='failed_sign_in') # failed sign in or sign out.
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserLogoutView(APIView):
'''API to logout and delete an auth token for TokenAuthentication Method'''
serializer_class = UserLoginSerializer
permission_classes = [IsAuthenticated, IsAdminUser]
# queryset = User.objects.all()
def post(self, request, *args, **kwargs):
try:
request.user.auth_token.delete()
except Exception as e:
print(e)
return Response({"failure": "Not found."}, status=status.HTTP_400_BAD_REQUEST)
return Response({"success": "Successfully logged out."}, status=status.HTTP_204_OK)
class UserValidateEmail(CreateAPIView):
serializer_class = UserLoginSerializer
permission_classes = [AllowAny]
queryset = User.objects.all()
def post(self, request, *args, **kwargs):
'''Validates if user can be registered by checking email/username doesn't already exist'''
data = request.data
if User.objects.filter(username=data['email']) or User.objects.filter(email=data['email']):
return Response({'status': False}, status=status.HTTP_200_OK)
else:
return Response({'status': True}, status=status.HTTP_200_OK)
class UserProfileView(APIView):
'''API to GET user profile information.'''
serializer_class = UserProfileSerializer
permission_classes = [IsAuthenticated]
queryset = User.objects.all()
def get(self, request, format=None):
user_obj = self.request.user
query = User.objects.filter(username=user_obj)
serializer = UserProfileSerializer(user_obj)
return Response(serializer.data)
class AnswerAPIView(ListCreateAPIView):
'''API to create one or multiple Answer instances '''
queryset = Answer.objects.all()
serializer_class = AnswerSerializer
permission_classes = [IsAuthenticated]
def post(self, request, *args, **kwargs):
'''Validates if user can be registered by checking email/username doesn't already exist'''
for i in range(len(request.data)):
request.data[i]['question'] = str(Question.objects.get(question_number=request.data[i]['question']).id) # sql returns long integers so need to cast back
serializer = AnswerSerializer(data=request.data, many=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get_serializer(self, *args, **kwargs):
if "data" in kwargs:
data = kwargs["data"]
if isinstance(data, list):
kwargs["many"] = True
return super(AnswerAPIView, self).get_serializer(*args, **kwargs)
class SymptomAPIView(ListCreateAPIView):
'''API to create one or multiple Symptom instances '''
queryset = Symptom.objects.all()
serializer_class = SymptomSerializer
permission_classes = [IsAuthenticated]
def get_serializer(self, *args, **kwargs):
if "data" in kwargs:
data = kwargs["data"]
if isinstance(data, list):
kwargs["many"] = True
return super(SymptomAPIView, self).get_serializer(*args, **kwargs)
class RecordAPIView(ListCreateAPIView):
'''API to GET or create a new Record '''
queryset = Record.objects.all()
serializer_class = RecordSerializer
permission_classes = [IsAuthenticated]
def create(self, request):
if 'created_date' in request.data:
request.data['created_date'] = datetime.datetime.fromtimestamp(int(request.data['created_date'])/1000).strftime('%Y-%m-%d %H:%M:%S'+'Z')
try:
record = Record.objects.get(user=request.user, created_date=request.data['created_date'])
print("One instance already initiated")
return Response({"detail": "Instance already initiated"}, status=status.HTTP_400_BAD_REQUEST)
except Record.MultipleObjectsReturned:
print("Multiple instances initiated")
return Response({"detail": "Instance already initiated"}, status=status.HTTP_400_BAD_REQUEST)
except Record.DoesNotExist:
print("Creating new record")
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
try:
serializer.save(user=self.request.user, created_date=request.data['created_date'])
except IntegrityError:
return Response({"detail": "Instance already initiated"}, status=status.HTTP_400_BAD_REQUEST)
else:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(user=self.request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def get_queryset(self):
return Record.objects.filter(user=self.request.user)
class RecordUpdateView(RetrieveUpdateDestroyAPIView):
serializer_class = RecordSerializer
queryset = Record.objects.all()
permission_classes = [IsAuthenticated]
def update(self, request, pk=None):
record = get_object_or_404(Record, pk=pk)
for param in request.data:
if param == 'score':
record.score = request.data[param]
print('updating score', record.signed)
if param == 'update_user':
record.signed = request.user if record.signed == None else None
print('updating user', record.signed)
record.save()
return Response({'detail': 'Signed user info updated'}, status=status.HTTP_200_OK)
# class RecordUpdateView(RetrieveUpdateDestroyAPIView):
# '''API to delete or edit a Record '''
# queryset = Record.objects.filter()
# serializer_class = RecordSerializer
# permission_classes = [IsAuthenticated]
# def update(self, request)
# model = Record
# success_url = reverse_lazy('id')
class QuestionUpdateView(RetrieveUpdateDestroyAPIView):
'''API to delete or edit a question '''
queryset = Question.objects.filter()
serializer_class = QuestionSerializer
permission_classes = [IsAuthenticated]
model = Question
success_url = reverse_lazy('id')
# Custom mixin for Generic views in Django Rest Framework API Guide
class MultipleFieldLookupMixin(object):
"""
Apply this mixin to any view or viewset to get multiple field filtering
based on a `lookup_fields` attribute, instead of the default single field filtering.
"""
def get_object(self):
queryset = self.get_queryset() # Get the base queryset
queryset = self.filter_queryset(queryset) # Apply any filter backends
filter = {}
for field in self.lookup_fields:
if self.kwargs[field]: # Ignore empty fields.
filter[field] = self.kwargs[field]
return get_object_or_404(queryset, **filter) # Lookup the object
class AnswerUpdateView(RetrieveUpdateDestroyAPIView, MultipleFieldLookupMixin):
''' API to delete or edit an answer based on the question associated with it '''
queryset = Answer.objects.filter()
serializer_class = AnswerSerializer
permission_classes = [IsAuthenticated]
model = Answer
success_url = reverse_lazy('record', 'question')
lookup_field = 'record'
lookup_field = 'question'
class SymptomUpdateView(RetrieveUpdateDestroyAPIView, MultipleFieldLookupMixin):
''' API to delete or edit a symptom '''
queryset = Symptom.objects.filter()
serializer_class = SymptomSerializer
permission_classes = [IsAuthenticated]
model = Symptom
success_url = reverse_lazy('record', 'symptom')
lookup_field = 'record'
lookup_field = 'symptom'
class QuestionGetAPIView(ListAPIView):
'''API to get questions in the database '''
serializer_class = QuestionGetSerializer
permission_classes = [IsAuthenticated]
queryset = Question.objects.all()
class CurrentUserView(APIView):
'''API to get current user's information '''
permission_classes = [IsAuthenticated]
def get(self, request):
serializer = UserCreateSerializer(self.request.user)
return Response(serializer.data)
######################################################################################
# Class based privileged user views
######################################################################################
class PatientHistoryView(APIView):
'''API to get patient history '''
serializer_class = PatientActivateSerializer
permission_classes = [IsAuthenticated, IsAdminUser]
queryset = User.objects.filter(is_staff=False)
def post(self, request, *args, **kwargs):
data = request.data
# Check if request contains username
username = data.get("username", None)
result = {}
if not username:
error = "username is required"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
else:
pass
#print "username found", data['username']
# Check if username is valid
if User.objects.filter(username=username).exists():
user = User.objects.filter(username=username).first()
if user.is_staff:
error = "user is not a patient!"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
user_serial = PatientActivateSerializer(user)
query = Record.objects.filter(user=user)
result = []
for record in query:
clean_result = {}
record_serial = RecordSerializer(record)
clean_result['record'] = record_serial.data
answers = Answer.objects.filter(record=record_serial.data['id'])
symptoms = Symptom.objects.filter(record=record_serial.data['id'])
ans_result, symp_result = [], []
for ans in answers:
ans_serial = AnswerGetSerializer(ans);
ans_result.append(ans_serial.data)
for symp in symptoms:
symp_serial = SymptomGetSerializer(symp);
symp_result.append(symp_serial.data)
clean_result['data'] = ans_result
clean_result['symp'] = symp_result
result.append(clean_result)
# print(result)
return Response(result, status=status.HTTP_200_OK)
else:
error = "username does not exist!"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
class PatientActivateView(APIView):
''' API to activate a patient account '''
serializer_class = PatientActivateSerializer
permission_classes = [IsAuthenticated, IsAdminUser]
queryset = User.objects.filter(is_staff=False)
def post(self, request, *args, **kwargs):
data = request.data
# Check if request contains username
username = data.get("username", None)
result = {}
if not username:
error = "username is required"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
else:
pass
#print "username found", data['username']
# Check if username is valid
if User.objects.filter(username=username).exists():
user = User.objects.filter(username=username).first()
if user.is_staff:
error = "user is not a patient"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
user.is_active = True
user.save()
user_serial = PatientActivateSerializer(user)
return Response(user_serial.data, status=status.HTTP_200_OK)
else:
error = "username does not exist"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
class PatientDeactivateView(APIView):
'''API to deactivate a patient account '''
serializer_class = PatientActivateSerializer
permission_classes = [IsAuthenticated, IsAdminUser]
queryset = User.objects.filter(is_staff=False)
def post(self, request, *args, **kwargs):
data = request.data
# Check if request contains username
username = data.get("username", None)
result = {}
if not username:
error = "username is required"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
else:
pass
#print "username found", data['username']
# Check if username is valid
if User.objects.filter(username=username).exists():
user = User.objects.filter(username=username).first()
if user.is_staff:
error = "user is not a patient"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
user.is_active = False
user.save()
user_serial = PatientActivateSerializer(user)
return Response(user_serial.data, status=status.HTTP_200_OK)
else:
error = "username does not exist"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
class EntityCreateView(CreateAPIView):
'''API to create a new Entity '''
serializer_class = EntityCreateSerializer
permission_classes = [IsAuthenticated, IsAdminUser]
queryset = Entity.objects.all()
class DoctorCreateView(CreateAPIView):
'''API to create a new doctor user '''
serializer_class = DoctorSerializer
permission_classes = [IsAuthenticated, IsAdminUser]
queryset = Doctor.objects.all()
class DoctorGetView(ListAPIView):
'''API to get doctor users '''
serializer_class = DoctorSerializer
permission_classes = [IsAuthenticated, IsAdminUser]
queryset = Doctor.objects.all()
class PatientGetView(ListAPIView):
'''API to Get a list of all patients '''
serializer_class = PatientStatusGetSerializer
permission_classes = [IsAuthenticated, IsAdminUser]
queryset = User.objects.filter(is_staff=False)
class PatientDataGetView(ListAPIView):
'''API to get all patients latest data '''
permission_classes = [IsAuthenticated, IsAdminUser]
queryset = User.objects.filter(is_staff=False)
def get(self, request, format=None):
patients = User.objects.filter(is_staff=False, is_active=True, date_joined__gte=datetime.date(2018, 06, 28))
# patients = User.objects.filter(is_staff=False, is_active=True)
result = []
if not request.user.is_anonymous:
Log.objects.create(user=request.user, activity='view_dashboard')
for user in patients:
# query = Record.objects.filter(user=user).order_by('-date').first()
# Get last submission for each patient
entry = collections.OrderedDict()
user_serial = PatientGetSerializer(user)
entry['user'] = user_serial.data;
patient = Patient.objects.filter(user=user).first()
# Get sector data
if patient is not None:
sector_serial = PatientSectorSerializer(patient)
entry['location'] = sector_serial.data
# print(patient,' sector: ', sector_serial.data)
else:
entry['location'] = { 'sector': ''}
# print(patient, 'no sector')
# Get latest score
notes = Notes.objects.filter(patient=user).last()
if notes is not None:
notes_serial = NotesGetSerializer(notes)
entry['notes'] = notes_serial.data
else:
entry['notes'] = {}
query = Record.objects.filter(user=user).last()
if query is not None:
rec = RecordSerializer(query)
entry['record'] = rec.data;
query = Answer.objects.filter(record=rec.data['id'])
ans = PatientRecordGetSerializer(query, many=True);
entry['data'] = ans.data;
result.append(entry)
else:
entry['record'] = {'date':'1900-05-24T07:27:21.238535Z'}
entry['data'] = []
result.append(entry)
result = sorted(result, key=lambda x: float(parse_datetime(x['record']['date']).strftime('%s')), reverse=True)
return Response(result)
class PatientScoreGetView(ListAPIView):
'''API to get all patients latest score '''
permission_classes = [IsAuthenticated, IsAdminUser]
queryset = User.objects.filter(is_staff=False)
def get(self, request, format=None):
patients = User.objects.filter(is_staff=False, is_active=True)
result = []
for user in patients:
# query = Record.objects.filter(user=user).order_by('-date').first()
# Get last submission for each patient
entry = collections.OrderedDict()
user_serial = PatientGetSerializer(user)
entry['user'] = user_serial.data;
patient = Patient.objects.filter(user=user).first()
if patient is not None:
sector_serial = PatientSectorSerializer(patient)
entry['location'] = sector_serial.data
else:
entry['location'] = { 'sector': ''}
query = Record.objects.filter(user=user).last()
if query is not None:
rec = RecordSerializer(query)
entry['record'] = rec.data;
result.append(entry)
return Response(result)
class NotesCreateView(APIView):
'''API to add notes for a patient '''
permission_classes = [IsAuthenticated, IsAdminUser]
queryset = User.objects.filter(is_staff=False)
def post(self, request, format=None):
if not request.user.is_anonymous:
Log.objects.create(user=request.user, activity='add_patient_note')
data = request.data
result = {}
# Check who posted
auth_user = None
if request and request.user:
auth_user = request.user
#print "Auth user: ", str(auth_user)
# Check if request contains notes username
notes = data.get("notes", None)
if not notes:
error = "notes is required!"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
username = data.get("username", None)
if not username:
error = "username is required!"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
# Check if requested user is a patient
if User.objects.filter(username=username).exists():
user = User.objects.filter(username=username).first()
if user.is_staff:
error = "user is not a patient"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
# Dosage is optional
dosage = data.get("dosage", None)
if not dosage:
saved_notes = Notes.objects.create(author=auth_user, patient=user, notes=notes)
else:
saved_notes = Notes.objects.create(author=auth_user, patient=user, notes=notes, dosage=dosage)
notes_serial = NotesCreateSerializer(saved_notes)
# print notes_serial.data
return Response(notes_serial.data, status=status.HTTP_201_CREATED)
else:
error = "username does not exist!"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
return Response(result)
class NotesGetAPIView(ListAPIView):
'''API to get notes for all users'''
permission_classes = [IsAuthenticated, IsAdminUser]
queryset = Notes.objects.all()
def get(self, request, format=None):
patients = User.objects.filter(is_staff=False, is_active=True)
result = []
for patient in patients:
# query = Record.objects.filter(user=user).order_by('-date').first()
# Get last submission for each patient
entry = collections.OrderedDict()
user_serial = PatientGetSerializer(patient)
entry['patient'] = user_serial.data;
notes = Notes.objects.filter(patient=patient).last()
if notes is not None:
notes_serial = NotesGetSerializer(notes)
entry['notes'] = notes_serial.data
else:
entry['notes'] = {}
result.append(entry)
return Response(result)
class NotesHistoryGetView(APIView):
'''API to get patient history '''
permission_classes = [IsAuthenticated, IsAdminUser]
queryset = User.objects.filter(is_staff=False)
def post(self, request, *args, **kwargs):
data = request.data
if not request.user.is_anonymous:
Log.objects.create(user=request.user, activity='view_patient_details')
# Check if request contains username
username = data.get("username", None)
result = {}
if not username:
error = "username is required"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
else:
pass
#print "username found", data['username']
# Check if username is valid
if User.objects.filter(username=username).exists():
user = User.objects.filter(username=username).first()
if user.is_staff:
error = "user is not a patient!"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST)
user_serial = UserGetSerializer(user)
query = Notes.objects.filter(patient=user)
result = []
for record in query:
notes_serial = NotesGetSerializer(record)
result.append(notes_serial.data)
return Response(result, status=status.HTTP_200_OK)
else:
error = "username does not exist!"
result['error'] = error
return Response(result, status=status.HTTP_400_BAD_REQUEST) | apache-2.0 | 1,060,832,617,351,029,600 | 36.651132 | 164 | 0.62359 | false | 4.369495 | false | false | false |
bubakazouba/Robinhood-for-Google-Finance | google_finance_export.py | 1 | 1431 | from GetRobinhoodTrades import getRobinhoodTrades
import argparse
import Exporter
exporter = Exporter.Exporter("trades")
parser = argparse.ArgumentParser(description='Export Robinhood trades to a CSV file')
parser.add_argument('--debug', action='store_true', help='store raw JSON output to debug.json')
parser.add_argument('--username', required=True, help='your Robinhood username')
parser.add_argument('--password', required=True, help='your Robinhood password')
exporter.addArgumentsToParser(parser)
args = parser.parse_args()
username = args.username
password = args.password
exporter.parseArguments(args)
trades = getRobinhoodTrades(username, password, args.debug)
# CSV headers
# filter out keys we dont need, also change names of keys
desired_keys_mappings = {
"price": "Purchase price per share",
"timestamp": "Date purchased",
"fees": "Commission",
"quantity": "Shares",
"symbol": "Symbol",
"side": "Transaction type"
}
desired_keys = sorted(desired_keys_mappings.keys())
keys = [desired_keys_mappings[key] for key in sorted(trades[0].keys()) if key in desired_keys]
csv = ""
csv += ",".join(keys)
csv += "\n"
# CSV rows
csvb = []
for trade in trades:
line = ','.join([str(trade[key]) for key in desired_keys])
csvb.append(line)
#google finance seems to prefer dates in ascending order, so we must reverse the given order
csv += '\n'.join(reversed(csvb))
exporter.exportText(csv) | mit | -5,983,913,685,366,143,000 | 28.833333 | 95 | 0.725367 | false | 3.439904 | false | false | false |
lanyeit/lanye-self | pandarss/utils.py | 2 | 2767 | #!/usr/bin/env python
#coding=utf-8
import decimal
import time
import random
import datetime
import calendar
random_generator = random.SystemRandom()
class Utils:
""" 工具模块类
"""
_base_id = 0
_CurrentID = random_generator.randrange(1, 1024)
@staticmethod
def fen2yuan(fen=0):
f = decimal.Decimal(fen or 0)
y = f / decimal.Decimal(100)
return str(y.quantize(decimal.Decimal('1.00')))
@staticmethod
def yuan2fen(yuan=0):
y = decimal.Decimal(yuan or 0)
f = y * decimal.Decimal(100)
return int(f.to_integral_value())
@staticmethod
def kb2mb(ik):
_kb = decimal.Decimal(ik or 0)
_mb = _kb / decimal.Decimal(1024)
return str(_mb.quantize(decimal.Decimal('1.00')))
@staticmethod
def sec2hour(sec=0):
_sec = decimal.Decimal(sec or 0)
_hor = _sec / decimal.Decimal(3600)
return str(_hor.quantize(decimal.Decimal('1.00')))
@staticmethod
def bps2mbps(bps):
_bps = decimal.Decimal(bps or 0)
_mbps = _bps / decimal.Decimal(1024*1024)
return str(_mbps.quantize(decimal.Decimal('1.00')))
@staticmethod
def gen_order_id():
if Utils._base_id >= 9999:Utils._base_id=0
Utils._base_id += 1
_num = str(Utils._base_id).zfill(4)
return datetime.datetime.now().strftime("%Y%m%d%H%M%S") + _num
@staticmethod
def add_months(dt,months, days=0):
month = dt.month - 1 + months
year = dt.year + month / 12
month = month % 12 + 1
day = min(dt.day,calendar.monthrange(year,month)[1])
dt = dt.replace(year=year, month=month, day=day)
return dt + datetime.timedelta(days=days)
class MemCache:
''' 内存缓存
'''
def __init__(self):
self.cache = {}
def set(self, key, obj, expire=0):
if obj in ("", None) or key in ("", None):
return None
objdict = dict(obj=obj,expire=expire,time=time.time())
self.cache[key] = objdict
def get(self, key):
if key in self.cache:
objdict = self.cache[key]
_time = time.time()
if objdict['expire'] == 0 or (_time - objdict['time']) < objdict['expire']:
return objdict['obj']
else:
del self.cache[key]
return None
else:
return None
def aget(self, key, fetchfunc, *args, **kwargs):
if key in self.cache:
return self.get(key)
elif fetchfunc:
expire = kwargs.pop('expire',600)
result = fetchfunc(*args,**kwargs)
if result:
self.set(key,result,expire=expire)
return result
memcache = MemCache()
| bsd-3-clause | -2,332,832,476,347,254,000 | 26.767677 | 87 | 0.559112 | false | 3.47096 | false | false | false |
wy315700/dota2_match | dota2/Match.py | 1 | 3864 | from dota2Error import Dota2APIError
import time
import datetime
# import dota2
from MysqlHelper import *
from APIConnection import *
from multiprocessing import Process, Queue, Lock, freeze_support
import time
import random
start_seq_num = 0
api = APIConnection()
process_num = 5
base_num = 1000000
base_start_num = 600000000
base_end_num = 650000000
max_process_num = (base_end_num - base_start_num) / base_num
def fetch_history_by_seq_num(data_queue, start_seq_num, callback_queue):
max_seq_num = start_seq_num + base_num
while True:
matchs = api._getMatchBySeqNum(start_seq_num)
for x in matchs:
while data_queue.full():
print "queue is full"
time.sleep(random.random())
data_queue.put(x)
start_seq_num = x['match_seq_num']
start_seq_num += 1
if start_seq_num >= max_seq_num:
callback_queue.put(1)
break
def saveToDB(queue):
while True:
x = queue.get()
handle = MatchHandle()
match = DotaMatchModel();
match.match_id = x['match_id']
match.match_seq_num = x['match_seq_num']
try:
match.player0 = str(x['players'][0])
match.player1 = str(x['players'][1])
match.player2 = str(x['players'][2])
match.player3 = str(x['players'][3])
match.player4 = str(x['players'][4])
match.player5 = str(x['players'][5])
match.player6 = str(x['players'][6])
match.player7 = str(x['players'][7])
match.player8 = str(x['players'][8])
match.player9 = str(x['players'][9])
except Exception, e:
pass
match.radiant_win = 0 if x['radiant_win'] == False else 1
match.duration = x['duration']
match.start_time = datetime.datetime.fromtimestamp(
int(x['start_time'])
).strftime('%Y-%m-%d %H:%M:%S')
match.first_blood_time = x['first_blood_time']
match.tower_status_radiant = x['tower_status_radiant']
match.tower_status_dire = x['tower_status_dire']
match.barracks_status_radiant = x['barracks_status_radiant']
match.barracks_status_dire = x['barracks_status_dire']
match.cluster = x['cluster']
match.lobby_type = x['lobby_type']
match.human_players = x['human_players']
match.leagueid = x['leagueid']
match.positive_votes = x['positive_votes']
match.negative_votes = x['negative_votes']
match.game_mode = x['game_mode']
handle.saveMatchToDB(match)
start_seq_num = start_seq_num + 1
if __name__ == '__main__':
freeze_support()
process_list = []
q = Queue()
callback_queue = Queue()
for i in xrange(0,process_num):
process_list.append(Process(target=fetch_history_by_seq_num,args=(q, base_start_num + base_num * i, callback_queue,)))
current_num = process_num
process_list.append(Process(target=saveToDB,args=(q,)))
for x in process_list:
x.start()
result = callback_queue.get()
while result == 1:
print current_num
if current_num >= max_process_num:
break
Process(target=fetch_history_by_seq_num,args=(q, base_start_num + base_num * current_num, callback_queue,)).start()
current_num += 1
result = callback_queue.get()
for x in process_list:
x.join()
| gpl-2.0 | -7,039,144,919,082,925,000 | 33.19469 | 126 | 0.524586 | false | 3.711816 | false | false | false |
michal-ruzicka/archivematica | src/MCPServer/lib/unitFile.py | 1 | 2379 | #!/usr/bin/python -OO
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage MCPServer
# @author Joseph Perry <[email protected]>
import logging
import sys
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
from dicts import ReplacementDict
LOGGER = logging.getLogger('archivematica.mcp.server')
class unitFile(object):
"""For objects representing a File"""
def __init__(self, currentPath, UUID="None", owningUnit=None):
self.currentPath = currentPath
self.UUID = UUID
self.owningUnit = owningUnit
self.fileGrpUse = 'None'
self.fileList = {currentPath: self}
self.pathString = ""
if owningUnit:
self.pathString = owningUnit.pathString
def __str__(self):
return 'unitFile: <UUID: {u.UUID}, path: {u.currentPath}>'.format(u=self)
def getReplacementDic(self, target=None):
if target is not None and self.owningUnit:
return self.owningUnit.getReplacementDic(self.owningUnit.currentPath)
elif self.UUID != "None":
return ReplacementDict.frommodel(
type_='file',
file_=self.UUID
)
# If no UUID has been assigned yet, we can't use the
# ReplacementDict.frommodel constructor; fall back to the
# old style of manual construction.
else:
return ReplacementDict({
"%relativeLocation%": self.currentPath,
"%fileUUID%": self.UUID,
"%fileGrpUse%": self.fileGrpUse
})
def reload(self):
pass
def reloadFileList(self):
pass
| agpl-3.0 | -8,173,164,090,468,189,000 | 33.478261 | 81 | 0.666246 | false | 3.843296 | false | false | false |
joopert/home-assistant | homeassistant/components/hlk_sw16/switch.py | 12 | 1410 | """Support for HLK-SW16 switches."""
import logging
from homeassistant.components.switch import ToggleEntity
from homeassistant.const import CONF_NAME
from . import DATA_DEVICE_REGISTER, SW16Device
_LOGGER = logging.getLogger(__name__)
def devices_from_config(hass, domain_config):
"""Parse configuration and add HLK-SW16 switch devices."""
switches = domain_config[0]
device_id = domain_config[1]
device_client = hass.data[DATA_DEVICE_REGISTER][device_id]
devices = []
for device_port, device_config in switches.items():
device_name = device_config.get(CONF_NAME, device_port)
device = SW16Switch(device_name, device_port, device_id, device_client)
devices.append(device)
return devices
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the HLK-SW16 platform."""
async_add_entities(devices_from_config(hass, discovery_info))
class SW16Switch(SW16Device, ToggleEntity):
"""Representation of a HLK-SW16 switch."""
@property
def is_on(self):
"""Return true if device is on."""
return self._is_on
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._client.turn_on(self._device_port)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._client.turn_off(self._device_port)
| apache-2.0 | -2,503,243,105,140,089,300 | 31.045455 | 86 | 0.680851 | false | 3.643411 | true | false | false |
rogerlew/py-st-a-ggre-lite3 | setup.py | 2 | 1523 | # Copyright (c) 2011, Roger Lew [see LICENSE.txt]
# This software is funded in part by NIH Grant P20 RR016454.
##from distutils.core import setup
from setuptools import setup
setup(name='pystaggrelite3',
version='0.1.3',
description='Pure Python sqlite3 statistics aggregate functions',
author='Roger Lew',
author_email='[email protected]',
license = "BSD",
classifiers=["Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Topic :: Database",
"Topic :: Database :: Database Engines/Servers",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Software Development :: Libraries :: Python Modules"],
url='http://code.google.com/p/py-st-a-ggre-lite3/',
py_modules=['pystaggrelite3',],
)
"""setup.py sdist upload --identity="Roger Lew" --sign"""
| bsd-3-clause | -6,954,211,580,426,667,000 | 44.151515 | 81 | 0.562049 | false | 4.389049 | false | true | false |
mitsei/dlkit | dlkit/primordium/mapping/spatial_units.py | 1 | 4657 | """
Implementions of osid.mapping.SpatialUnit.
Can be used by implementations and consumer applications alike.
"""
from dlkit.abstract_osid.mapping import primitives as abc_mapping_primitives
from dlkit.abstract_osid.osid.errors import NullArgument, InvalidArgument
from ..osid.primitives import OsidPrimitive
from ..type.primitives import Type
from dlkit.primordium.mapping.coordinate_primitives import BasicCoordinate
from decimal import Decimal
class SpatialUnitFactory(object):
"""returns the right SpatialUnit depending on the record type in the spatial_unit_map
Assumes only one record type for now!
"""
def get_spatial_unit(self, spatial_unit_map):
record_type = Type(idstr=spatial_unit_map['recordTypes'][0])
if (record_type.get_authority() != 'ODL.MIT.EDU' or
record_type.get_identifier_namespace() != 'osid.mapping.SpatialUnit'):
raise InvalidArgument()
if record_type.get_identifier() == 'rectangle':
return RectangularSpatialUnit(spatial_unit_map=spatial_unit_map)
raise InvalidArgument()
class RectangularSpatialUnit(abc_mapping_primitives.SpatialUnit, OsidPrimitive):
"""
A spatial unit represents a region in space.
In this case a rectangle in a 2 dimensional coordinate space.
"""
def __init__(self, coordinate=None, width=None, height=None, spatial_unit_map=None):
if spatial_unit_map is None and coordinate is None and width is None and height is None:
raise NullArgument('must provide a coordinate or a spatial_unit_map')
if spatial_unit_map is not None:
self._coordinate = BasicCoordinate(spatial_unit_map['coordinateValues'])
self._width = spatial_unit_map['width']
self._height = spatial_unit_map['height']
else:
if not isinstance(coordinate, abc_mapping_primitives.Coordinate):
raise InvalidArgument('coordinate must be a Coordinate')
if height is None:
raise NullArgument('height must be provided with a coordinate')
if width is None:
raise NullArgument('width must be provided with a coordinate')
if not (isinstance(height, int) or isinstance(height, float)):
raise InvalidArgument('height must be an int or float')
if not (isinstance(width, int) or isinstance(width, float)):
raise InvalidArgument('width must be an int or float')
if width <= 0 or height <= 0:
raise InvalidArgument('width and height must be positive values')
self._coordinate = coordinate
self._width = width
self._height = height
def get_center_coordinate(self):
x, y = self._coordinate.get_values()
return BasicCoordinate([
float(Decimal(x) + Decimal(self._width) / 2),
float(Decimal(y) + Decimal(self._height) / 2)
])
center_coordinate = property(fget=get_center_coordinate)
def get_bounding_coordinates(self):
x, y = self._coordinate.get_values()
return [
self._coordinate,
BasicCoordinate([x + self._width, y]),
BasicCoordinate([x + self._width, y + self._height]),
BasicCoordinate([x, y + self._height])
]
bounding_coordinates = property(fget=get_bounding_coordinates)
def get_spatial_unit_record(self):
pass # This should return a record type for
spatial_unit_record = property(fget=get_spatial_unit_record)
def __contains__(self, coordinate):
if not isinstance(coordinate, abc_mapping_primitives.Coordinate):
raise TypeError('osid.mapping.SpatialUnit requires osid.mapping.Coordinate as left operand')
x, y = self._coordinate.get_values()
return bool(coordinate >= self._coordinate and
coordinate <= BasicCoordinate([x + self._width, y + self._height]))
def get_record_types(self):
return [
Type(authority='ODL.MIT.EDU',
namespace='osid.mapping.SpatialUnit',
identifier='rectangle')]
def is_of_record_type(self, record_type):
return bool(record_type in self.get_record_types())
def get_spatial_unit_map(self):
record_types = []
for rtype in self.get_record_types():
record_types.append(str(rtype))
return {
'type': 'SpatialUnit',
'recordTypes': record_types,
'coordinateValues': self._coordinate.get_values(),
'width': self._width,
'height': self._height
}
| mit | 4,065,296,784,621,175,000 | 39.146552 | 104 | 0.638394 | false | 4.308048 | false | false | false |
tmcrosario/odoo-tmc | tmc/models/dependence.py | 1 | 1112 | from odoo import _, api, fields, models
class Dependence(models.Model):
_name = 'tmc.dependence'
_description = 'Dependence'
name = fields.Char()
abbreviation = fields.Char(size=6)
document_type_ids = fields.Many2many(comodel_name='tmc.document_type')
document_topic_ids = fields.Many2many(comodel_name='tmc.document_topic')
system_ids = fields.Many2many(comodel_name='tmc.system')
in_actual_nomenclator = fields.Boolean()
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
if not args:
args = []
if self._context.get('search_default_filter_actual_nomenclator'):
args.extend([('in_actual_nomenclator', '=', True)])
return super(Dependence, self).name_search(name=name,
args=args,
operator=operator,
limit=limit)
_sql_constraints = [('name_unique', 'UNIQUE(name)',
_('Dependence name must be unique'))]
| agpl-3.0 | 2,107,047,882,029,457,700 | 32.69697 | 76 | 0.554856 | false | 4.058394 | false | false | false |
mshubian/BAK_open-hackathon | open-hackathon-adminUI/src/app/database/models.py | 1 | 3577 | import sys
import email
sys.path.append("..")
from . import UserMixin
from . import db
from datetime import datetime
import json
def to_json(inst, cls):
# add your coversions for things like datetime's
# and what-not that aren't serializable.
convert = dict()
convert[db.DateTime] = str
d = dict()
for c in cls.__table__.columns:
v = getattr(inst, c.name)
if c.type.__class__ in convert.keys() and v is not None:
try:
func = convert[c.type.__class__]
d[c.name] = func(v)
except:
d[c.name] = "Error: Failed to covert using ", str(convert[c.type.__class__])
elif v is None:
d[c.name] = str()
else:
d[c.name] = v
return json.dumps(d)
class AdminUser(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
nickname = db.Column(db.String(50))
openid = db.Column(db.String(100))
avatar_url = db.Column(db.String(200))
access_token = db.Column(db.String(100))
online = db.Column(db.Integer) # 0:offline 1:online
create_time = db.Column(db.DateTime)
last_login_time = db.Column(db.DateTime)
def get_admin_id(self):
return self.id
def json(self):
return to_json(self, self.__class__)
def __init__(self, **kwargs):
super(AdminUser, self).__init__(**kwargs)
if self.create_time is None:
self.create_time = datetime.utcnow()
if self.last_login_time is None:
self.last_login_time = datetime.utcnow()
# if self.slug is None:
# self.slug = str(uuid.uuid1())[0:8] # todo generate a real slug
class AdminEmail(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
email = db.Column(db.String(120))
primary_email = db.Column(db.Integer) # 0:NOT Primary Email 1:Primary Email
verified = db.Column(db.Integer) # 0 for not verified, 1 for verified
admin_id = db.Column(db.Integer, db.ForeignKey('admin_user.id', ondelete='CASCADE'))
admin = db.relationship('AdminUser', backref=db.backref('emails', lazy='dynamic'))
def __init__(self, **kwargs):
super(AdminEmail, self).__init__(**kwargs)
class AdminToken(db.Model):
id = db.Column(db.Integer, primary_key=True)
token = db.Column(db.String(50), unique=True, nullable=False)
admin_id = db.Column(db.Integer, db.ForeignKey('admin_user.id', ondelete='CASCADE'))
admin = db.relationship('AdminUser', backref=db.backref('tokens', lazy='dynamic'))
issue_date = db.Column(db.DateTime)
expire_date = db.Column(db.DateTime, nullable=False)
def json(self):
return to_json(self, self.__class__)
def __init__(self, **kwargs):
super(AdminToken, self).__init__(**kwargs)
if self.issue_date is None:
issue_date = datetime.utcnow()
def __repr__(self):
return "AdminToken: " + self.json()
class AdminUserHackathonRel(db.Model):
id = db.Column(db.Integer, primary_key=True)
admin_email = db.Column(db.String(120))
role_type = db.Column(db.Integer)
hackathon_id = db.Column(db.Integer)
state = db.Column(db.Integer)
remarks = db.Column(db.String(255))
create_time = db.Column(db.DateTime)
def json(self):
return to_json(self, self.__class__)
def __init__(self, **kwargs):
super(AdminUserHackathonRel, self).__init__(**kwargs)
def __repr__(self):
return "AdminUserGroup: " + self.json() | apache-2.0 | 3,798,570,820,496,018,400 | 29.844828 | 93 | 0.612524 | false | 3.396961 | false | false | false |
waneric/PyMapLib | src/gabbs/controls/MapToolAction.py | 1 | 6234 | # -*- coding: utf-8 -*-
"""
MapToolAction.py - map tool for user events
======================================================================
AUTHOR: Wei Wan, Purdue University
EMAIL: [email protected]
Copyright (c) 2016 Purdue University
See the file "license.terms" for information on usage and
redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES.
======================================================================
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from gabbs.MapUtils import iface, debug_trace
class MapToolFeatureAction(QgsMapTool):
'''
Base class for the map select tools
'''
def __init__(self, canvas):
QgsMapTool.__init__(self, canvas)
self.canvas = canvas
self.rubberBand = None
self.cursor = QCursor(Qt.ArrowCursor)
# Override events
def canvasReleaseEvent(self, event):
layer = self.canvas.currentLayer()
if not layer or layer.type() != QgsMapLayer.VectorLayer:
#emit messageEmitted( tr( "To run an action, you must choose an active vector layer." ), QgsMessageBar.INFO );
return
if layer not in self.canvas.layers():
#do not run actions on hidden layers
return
#QgsVectorLayer *vlayer = qobject_cast<QgsVectorLayer *>( layer );
#if (layer.actions().size() == 0 and \
# len(QgsMapLayerActionRegistry.instance().mapLayerActions(layer)) == 0):
#emit messageEmitted( tr( "The active vector layer has no defined actions" ), QgsMessageBar::INFO );
return
if(not self.doAction(layer, event.x(), event.y())):
#QgisApp.instance().statusBar().showMessage(tr("No features at this position found."))
pass
"""
def activate(self):
QgsMapTool.activate()
def deactivate(self):
QgsMapTool.deactivate()
"""
def doAction(self, layer, x, y):
if (not layer):
return False
point = self.canvas.getCoordinateTransform().toMapCoordinates(x, y)
featList = []
#toLayerCoordinates will throw an exception for an 'invalid' point.
#For example, if you project a world map onto a globe using EPSG 2163
#and then click somewhere off the globe, an exception will be thrown.
try:
#create the search rectangle
searchRadius = self.searchRadiusMU(self.canvas)
r = QgsRectangle()
r.setXMinimum(point.x() - searchRadius)
r.setXMaximum(point.x() + searchRadius)
r.setYMinimum(point.y() - searchRadius)
r.setYMaximum(point.y() + searchRadius)
r = self.toLayerCoordinates(layer, r)
fit = layer.getFeatures(QgsFeatureRequest().setFilterRect(r).setFlags(QgsFeatureRequest.ExactIntersect))
f = QgsFeature()
while(fit.nextFeature(f)):
featList.append(QgsFeature(f))
except QgsCsException as cse:
#Q_UNUSED(cse)
#catch exception for 'invalid' point and proceed with no features found
QgsDebugMsg(QString( "Caught CRS exception %1" ).arg(cse.what()))
if len(featList) == 0:
return False
for feat in featList:
if (layer.actions().defaultAction() >= 0):
#define custom substitutions: layer id and clicked coords
substitutionMap = {} #QMap
substitutionMap["$layerid"] = layer.id()
point = self.toLayerCoordinates(layer, point)
substitutionMap["$clickx"] = point.x()
substitutionMap["$clicky"] = point.y()
actionIdx = layer.actions().defaultAction()
#layer.actions().doAction(actionIdx, feat, substitutionMap)
self.doAttributeAction(layer, actionIdx, feat, substitutionMap)
else:
mapLayerAction = QgsMapLayerActionRegistry.instance().defaultActionForLayer(layer)
if(mapLayerAction):
mapLayerAction.triggerForFeature(layer, feat)
return True
""" Reimplement method in QGIS C++ code
"""
def doAttributeAction(self, layer, index, feat, substitutionMap):
actions = layer.actions()
if (index < 0 or index >= actions.size()):
return
action = actions.at(index)
if (not action.runable()):
return
# search for expressions while expanding actions
# no used for python binding
#context = self.createExpressionContext(layer)
#context.setFeature(feat)
#expandedAction = QString(QgsExpression.replaceExpressionText(action.action(), context, substitutionMap))
expandedAction = QString(QgsExpression.replaceExpressionText(action.action(), feat, layer, substitutionMap))
if (expandedAction.isEmpty()):
return
newAction = QgsAction(action.type(), action.name(), expandedAction, action.capture())
self.runAttributeAction(newAction)
def runAttributeAction(self, action):
if (action.type() == QgsAction.OpenUrl):
finfo = QFileInfo(action.action())
if (finfo.exists() and finfo.isFile()):
QDesktopServices.openUrl(QUrl.fromLocalFile(action.action()))
else:
QDesktopServices.openUrl(QUrl(action.action(), QUrl.TolerantMode))
elif (action.type() == QgsAction.GenericPython):
# TODO: capture output from QgsPythonRunner (like QgsRunProcess does)
QgsPythonRunner.run(action.action(), QString("Python running error"))
else:
#The QgsRunProcess instance created by this static function
#deletes itself when no longer needed.
QgsRunProcess.create(action.action(), action.capture())
"""
def createExpressionContext(self, layer):
context = QgsExpressionContext()
context.append(QgsExpressionContextUtils.globalScope())
context.append(QgsExpressionContextUtils.projectScope())
if (layer):
context.append(QgsExpressionContextUtils.layerScope(layer))
return context
""" | mit | 1,932,736,580,259,168,300 | 36.787879 | 118 | 0.612608 | false | 4.338205 | false | false | false |
pfnet/chainer | tests/chainer_tests/links_tests/caffe_tests/test_caffe_function.py | 4 | 35121 | import os
import tempfile
import unittest
import warnings
import mock
import numpy
import six
import chainer
from chainer import links
from chainer import testing
# The caffe submodule relies on protobuf which under protobuf==3.7.0 and
# Python 3.7 raises a DeprecationWarning from the collections module.
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
from chainer.links import caffe
from chainer.links.caffe.caffe_function import caffe_pb
def _iter_init(param, data):
if isinstance(data, list):
for d in data:
if hasattr(param, 'add'):
param.add()
if isinstance(d, (list, dict)):
_iter_init(param[-1], d)
else:
param[-1] = d
else:
param.append(d)
elif isinstance(data, dict):
for k, d in data.items():
if isinstance(d, (list, dict)):
_iter_init(getattr(param, k), d)
else:
setattr(param, k, d)
else:
setattr(param, data)
def _make_param(data):
param = caffe_pb.NetParameter()
_iter_init(param, data)
return param
class TestCaffeFunctionBase(unittest.TestCase):
def setUp(self):
param = _make_param(self.data)
# The name can be used to open the file a second time,
# while the named temporary file is still open on the Windows.
with tempfile.NamedTemporaryFile(delete=False) as f:
self.temp_file_path = f.name
f.write(param.SerializeToString())
def tearDown(self):
os.remove(self.temp_file_path)
def init_func(self):
self.func = caffe.CaffeFunction(self.temp_file_path)
class TestCaffeFunctionBaseMock(TestCaffeFunctionBase):
def setUp(self):
outs = []
for shape in self.out_shapes:
out_data = numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
outs.append(chainer.Variable(out_data))
self.outputs = tuple(outs)
ret_value = outs[0] if len(outs) == 1 else tuple(outs)
m = mock.MagicMock(name=self.func_name, return_value=ret_value)
self.patch = mock.patch(self.func_name, m)
self.mock = self.patch.start()
super(TestCaffeFunctionBaseMock, self).setUp()
def tearDown(self):
super(TestCaffeFunctionBaseMock, self).tearDown()
self.patch.stop()
def call(self, inputs, outputs):
invars = []
for shape in self.in_shapes:
data = numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
invars.append(chainer.Variable(data))
self.inputs = invars
with chainer.using_config('train', False):
out = self.func(inputs=dict(zip(inputs, invars)),
outputs=outputs)
self.assertEqual(len(out), len(self.outputs))
for actual, expect in zip(out, self.outputs):
self.assertIs(actual, expect)
class TestConcat(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.concat'
in_shapes = [(3, 2, 3), (3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Concat',
'bottom': ['x', 'y'],
'top': ['z'],
'concat_param': {
'axis': 2
}
}
]
}
def test_concat(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x', 'y'], ['z'])
self.mock.assert_called_once_with(
(self.inputs[0], self.inputs[1]), axis=2)
class TestConvolution(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Convolution2D.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Convolution',
'bottom': ['x'],
'top': ['y'],
'convolution_param': {
'kernel_size': [2],
'stride': [3],
'pad': [4],
'group': 3,
'bias_term': True,
},
'blobs': [
{
'num': 6,
'channels': 4,
'data': list(range(96))
},
{
'data': list(range(6))
}
]
}
]
}
def test_convolution(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
f = self.func.l1
self.assertIsInstance(f, links.Convolution2D)
for i in range(3): # 3 == group
in_slice = slice(i * 4, (i + 1) * 4) # 4 == channels
out_slice = slice(i * 2, (i + 1) * 2) # 2 == num / group
w = f.W.data[out_slice, in_slice]
numpy.testing.assert_array_equal(
w.flatten(), range(i * 32, (i + 1) * 32))
numpy.testing.assert_array_equal(
f.b.data, range(6))
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestDeconvolution(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Deconvolution2D.__call__'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Deconvolution',
'bottom': ['x'],
'top': ['y'],
'convolution_param': {
'kernel_size': [2],
'stride': [3],
'pad': [4],
'group': 3,
'bias_term': True,
},
'blobs': [
{
'num': 6,
'channels': 4,
'data': list(range(96))
},
{
'data': list(range(12))
}
]
}
]
}
def test_deconvolution(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
f = self.func.l1
self.assertIsInstance(f, links.Deconvolution2D)
for i in range(3): # 3 == group
in_slice = slice(i * 4, (i + 1) * 4) # 4 == channels
out_slice = slice(i * 2, (i + 1) * 2) # 2 == num / group
w = f.W.data[out_slice, in_slice]
numpy.testing.assert_array_equal(
w.flatten(), range(i * 32, (i + 1) * 32))
numpy.testing.assert_array_equal(
f.b.data, range(12))
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestData(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'Data',
}
]
}
def test_data(self):
self.init_func()
self.assertEqual(len(self.func.layers), 0)
class TestDropout(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.dropout'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Dropout',
'bottom': ['x'],
'top': ['y'],
'dropout_param': {
'dropout_ratio': 0.25
}
}
]
}
def test_dropout(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(
self.inputs[0], ratio=0.25)
class TestInnerProduct(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Linear.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'InnerProduct',
'bottom': ['x'],
'top': ['y'],
'inner_product_param': {
'bias_term': True,
'axis': 1
},
'blobs': [
# weight
{
'shape': {
'dim': [2, 3]
},
'data': list(range(6)),
},
# bias
{
'shape': {
'dim': [2]
},
'data': list(range(2)),
}
]
}
]
}
def test_linear(self):
self.init_func()
f = self.func.l1
self.assertIsInstance(f, links.Linear)
numpy.testing.assert_array_equal(
f.W.data, numpy.array([[0, 1, 2], [3, 4, 5]], dtype=numpy.float32))
numpy.testing.assert_array_equal(
f.b.data, numpy.array([0, 1], dtype=numpy.float32))
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestInnerProductDim4(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Linear.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'InnerProduct',
'bottom': ['x'],
'top': ['y'],
'inner_product_param': {
'bias_term': False,
'axis': 1
},
'blobs': [
# weight
{
'shape': {
'dim': [4, 5, 2, 3]
},
# when `ndim` == 4, `data` stored shape[2] x shape[3]
# data
'data': list(range(6)),
}
]
}
]
}
def test_linear(self):
self.init_func()
f = self.func.l1
self.assertIsInstance(f, links.Linear)
numpy.testing.assert_array_equal(
f.W.data, numpy.array([[0, 1, 2], [3, 4, 5]], dtype=numpy.float32))
self.assertIsNone(f.b)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestInnerProductInvalidDim(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'InnerProduct',
'blobs': [
{
'shape': {
'dim': [2, 3, 4, 5, 6] # 5-dim is not supported
},
},
]
}
]
}
def test_linear(self):
with self.assertRaises(RuntimeError):
self.init_func()
class TestInnerProductNonDefaultAxis(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'InnerProduct',
'inner_product_param': {
'axis': 0 # non-default axis
}
}
]
}
def test_linear(self):
with self.assertRaises(RuntimeError):
self.init_func()
class TestLRN(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.local_response_normalization'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'LRN',
'bottom': ['x'],
'top': ['y'],
'lrn_param': {
'local_size': 4,
'alpha': 0.5,
'beta': 0.25,
'norm_region': 0, # ACROSS_CHANNELS
'k': 0.5
},
}
]
}
def test_lrn(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(
self.inputs[0], n=4, k=0.5, alpha=0.5 / 4, beta=0.25)
class TestLRNWithinChannel(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'LRN',
'lrn_param': {
'norm_region': 1, # WITHIN_CHANNELS is not supported
},
}
]
}
def test_lrn(self):
with self.assertRaises(RuntimeError):
self.init_func()
class TestMaxPooling(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.max_pooling_2d'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Pooling',
'bottom': ['x'],
'top': ['y'],
'pooling_param': {
'pool': 0, # MAX
'kernel_h': 2,
'kernel_w': 3,
'stride_h': 4,
'stride_w': 5,
'pad_h': 6,
'pad_w': 7,
}
}
]
}
def test_max_pooling(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(
self.inputs[0], (2, 3), stride=(4, 5), pad=(6, 7))
class TestAveragePooling(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.average_pooling_2d'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Pooling',
'bottom': ['x'],
'top': ['y'],
'pooling_param': {
'pool': 1, # AVE
'kernel_size': 2,
'stride': 4,
'pad': 6,
}
}
]
}
def test_max_pooling(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(
self.inputs[0], 2, stride=4, pad=6)
class TestGlobalPooling(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.max_pooling_2d'
in_shapes = [(3, 2, 3, 4)]
out_shapes = [(3, 2, 3, 4)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Pooling',
'bottom': ['x'],
'top': ['y'],
'pooling_param': {
'pool': 0, # MAX
'global_pooling': True,
}
}
]
}
def test_global_pooling(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(
self.inputs[0], (3, 4), stride=1, pad=0)
class TestStochasticPooling(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'Pooling',
'pooling_param': {
'pool': 2, # STOCHASTIC is not supported
}
}
]
}
def test_stochastic_pooling(self):
with self.assertRaises(RuntimeError):
self.init_func()
class TestReLU(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.relu'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'ReLU',
'bottom': ['x'],
'top': ['y'],
'relu_param': {
'negative_slope': 0
}
}
]
}
def test_lrn(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestLeakyReLU(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.leaky_relu'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'ReLU',
'bottom': ['x'],
'top': ['y'],
'relu_param': {
'negative_slope': 0.5
}
}
]
}
def test_lrn(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0], slope=0.5)
class TestReshape(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.reshape'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 6)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Reshape',
'bottom': ['x'],
'top': ['y'],
'reshape_param': {
'shape': {
'dim': [3, 6]
}
}
}
]
}
def test_reshape(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0], shape=[3, 6])
class TestBatchNorm(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.BatchNormalization.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'BatchNorm',
'bottom': ['x'],
'top': ['y'],
'blobs': [
# For average mean.
{
'shape': {
'dim': [3],
},
'data': list(six.moves.range(3)),
},
# For average variance.
{
'shape': {
'dim': [3],
},
'data': list(six.moves.range(3)),
},
],
'batch_norm_param': {
'use_global_stats': False,
}
}
]
}
def test_batchnorm(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0], finetune=False)
class TestBatchNormUsingGlobalStats(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.BatchNormalization.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'BatchNorm',
'bottom': ['x'],
'top': ['y'],
'blobs': [
# For average mean.
{
'shape': {
'dim': [3],
},
'data': list(six.moves.range(3)),
},
# For average variance.
{
'shape': {
'dim': [3],
},
'data': list(six.moves.range(3)),
},
],
'batch_norm_param': {
'use_global_stats': True,
}
}
]
}
def test_batchnorm(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0], finetune=False)
class TestEltwiseProd(TestCaffeFunctionBaseMock):
func_name = 'chainer.variable.Variable.__mul__'
in_shapes = [(2, 3), (2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Eltwise',
'bottom': ['x1', 'x2', 'x3'],
'top': ['y'],
'eltwise_param': {
'operation': 0, # PROD
},
}
]
}
def test_eltwise_prod(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x1', 'x2', 'x3'], ['y'])
self.mock.assert_has_calls([mock.call(self.inputs[1]),
mock.call(self.inputs[2])])
class TestEltwiseSum(TestCaffeFunctionBaseMock):
func_name = 'chainer.variable.Variable.__add__'
in_shapes = [(2, 3), (2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Eltwise',
'bottom': ['x1', 'x2', 'x3'],
'top': ['y'],
'eltwise_param': {
'operation': 1, # SUM
},
}
]
}
def test_eltwise_sum(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x1', 'x2', 'x3'], ['y'])
self.mock.assert_has_calls([mock.call(self.inputs[1]),
mock.call(self.inputs[2])])
class TestEltwiseSumCoeff(TestCaffeFunctionBaseMock):
func_name = 'chainer.variable.Variable.__add__'
in_shapes = [(2, 3), (2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Eltwise',
'bottom': ['x1', 'x2', 'x3'],
'top': ['y'],
'eltwise_param': {
'operation': 1, # SUM
'coeff': list(six.moves.range(3)),
},
}
]
}
def test_eltwise_sum(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x1', 'x2', 'x3'], ['y'])
self.assertEqual(self.mock.call_count, 2)
class TestEltwiseSumInvalidCoeff(TestCaffeFunctionBaseMock):
func_name = 'chainer.variable.Variable.__add__'
in_shapes = [(2, 3), (2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Eltwise',
'bottom': ['x1', 'x2', 'x3'],
'top': ['y'],
'eltwise_param': {
'operation': 1, # SUM
# not same as number of bottoms
'coeff': list(six.moves.range(2)),
},
}
]
}
def test_eltwise_sum(self):
self.init_func()
with self.assertRaises(AssertionError):
self.call(['x1', 'x2', 'x3'], ['y'])
class TestEltwiseMax(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.maximum'
in_shapes = [(2, 3), (2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Eltwise',
'bottom': ['x1', 'x2', 'x3'],
'top': ['y'],
'eltwise_param': {
'operation': 2, # MAX
},
}
]
}
def test_eltwise_max(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x1', 'x2', 'x3'], ['y'])
self.mock.assert_has_calls(
[mock.call(self.inputs[0], self.inputs[1]),
mock.call(self.outputs[0], self.inputs[2])])
class TestScale(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Scale.forward'
in_shapes = [(2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Scale',
'bottom': ['x', 'y'],
'top': ['z'],
'scale_param': {
'axis': 0,
}
}
]
}
def test_scale(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x', 'y'], ['z'])
self.mock.assert_called_once_with(self.inputs[0], self.inputs[1])
class TestScaleOneBottom(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Scale.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Scale',
'bottom': ['x'],
'top': ['y'],
'blobs': [
{
'shape': {
'dim': [2, 3],
},
'data': list(six.moves.range(6)),
}
],
'scale_param': {
'axis': 0,
}
}
]
}
def test_scale_one_bottom(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestScaleWithBias(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Scale.forward'
in_shapes = [(2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Scale',
'bottom': ['x', 'y'],
'top': ['z'],
'blobs': [
{
'shape': {
'dim': [2, 3],
},
'data': list(six.moves.range(6)),
}
],
'scale_param': {
'axis': 0,
'bias_term': True,
}
}
]
}
def test_scale_with_bias(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.assertTrue(hasattr(self.func.l1, 'bias'))
self.call(['x', 'y'], ['z'])
self.mock.assert_called_once_with(self.inputs[0], self.inputs[1])
class TestScaleOneBottomWithBias(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Scale.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Scale',
'bottom': ['x'],
'top': ['y'],
'blobs': [
# For W parameter.
{
'shape': {
'dim': [2, 3],
},
'data': list(six.moves.range(6)),
},
# For bias.
{
'shape': {
'dim': [2, 3],
},
'data': list(six.moves.range(6)),
}
],
'scale_param': {
'axis': 0,
'bias_term': True,
}
}
]
}
def test_scale_one_bottom_with_bias(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.assertTrue(hasattr(self.func.l1, 'bias'))
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestSlice(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.split_axis'
in_shapes = [(3, 4, 3)]
out_shapes = [(3, 2, 3), (3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Slice',
'bottom': ['x'],
'top': ['y1', 'y2'],
'slice_param': {
'axis': 1
}
}
]
}
def test_slice(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y1', 'y2'])
self.mock.assert_called_once_with(
self.inputs[0],
indices_or_sections=2,
axis=1
)
class TestSliceNoAxis(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.split_axis'
in_shapes = [(4, 6, 4)]
out_shapes = [(2, 6, 4), (2, 6, 4)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Slice',
'bottom': ['x'],
'top': ['y1', 'y2'],
'slice_param': {
'slice_dim': 0
}
}
]
}
def test_slice_no_axis(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y1', 'y2'])
self.mock.assert_called_once_with(
self.inputs[0],
indices_or_sections=2,
axis=0
)
class TestSliceNoAxisNoSliceDim(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.split_axis'
in_shapes = [(4, 6, 4)]
out_shapes = [(4, 3, 4), (4, 3, 4)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Slice',
'bottom': ['x'],
'top': ['y1', 'y2'],
}
]
}
def test_slice_no_axis_no_slice_dim(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y1', 'y2'])
self.mock.assert_called_once_with(
self.inputs[0],
indices_or_sections=2,
axis=1
)
class TestSliceSlicePoint(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.split_axis'
in_shapes = [(4, 8, 6)]
out_shapes = [(4, 3, 6), (4, 2, 6), (4, 3, 6)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Slice',
'bottom': ['x'],
'top': ['y1', 'y2', 'y3'],
'slice_param': {
'axis': 1,
'slice_point': [3, 5]
}
}
]
}
def test_slice_slice_point(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y1', 'y2', 'y3'])
self.mock.assert_called_once_with(
self.inputs[0],
indices_or_sections=[3, 5],
axis=1
)
class TestSigmoid(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.sigmoid'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Sigmoid',
'bottom': ['x'],
'top': ['y'],
}
]
}
def test_sigmoid(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestSoftmax(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.softmax'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Softmax',
'bottom': ['x'],
'top': ['y'],
}
]
}
def test_softmax(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestSoftmaxCaffeEngine(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.softmax'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Softmax',
'softmax_param': {
'engine': 1, # CAFFE
},
'bottom': ['x'],
'top': ['y'],
}
]
}
def test_softmax_caffe_engine(self):
# TODO(beam2d): Check if the mock is called with
# chainer.config.use_cudnn == False
self.init_func()
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestSoftmaxcuDnnEngine(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.softmax'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Softmax',
'softmax_param': {
'engine': 2, # CUDNN
},
'bottom': ['x'],
'top': ['y'],
}
]
}
def test_softmax_cuDNN_engine(self):
# TODO(beam2d): Check if the mock is called with
# chainer.config.use_cudnn == True
self.init_func()
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestSoftmaxInvalidAxis(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'Softmax',
'softmax_param': {
'axis': 0, # invalid axis
}
}
]
}
def test_softmax_invalid_axis(self):
with self.assertRaises(RuntimeError):
self.init_func()
class TestSoftmaxWithLoss(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.softmax_cross_entropy'
in_shapes = [(3, 2, 3)]
out_shapes = [()]
data = {
'layer': [
{
'name': 'l1',
'type': 'SoftmaxWithLoss',
'bottom': ['x'],
'top': ['y'],
}
]
}
def test_softmax_with_loss(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestSoftmaxWithLossInvalidAxis(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'SoftmaxWithLoss',
'softmax_param': {
'axis': 0, # invalid axis
}
}
]
}
def test_softmax_with_loss_invalid_axis(self):
with self.assertRaises(RuntimeError):
self.init_func()
class TestSplit(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'Split',
'bottom': ['x'],
'top': ['y', 'z'],
}
]
}
def test_split(self):
self.init_func()
self.assertEqual(self.func.split_map, {'y': 'x', 'z': 'x'})
testing.run_module(__name__, __file__)
| mit | -7,287,609,699,628,264,000 | 25.170641 | 79 | 0.410239 | false | 3.856908 | true | false | false |
zstackio/zstack-woodpecker | tools/trans/resource.py | 1 | 25317 | # encoding=utf-8
RUNNING = "RUNNING"
STOPPED = "STOPPED"
DELETED = "DELETED"
EXPUNGED = "EXPUNGED"
ATTACHED = "ATTACHED"
DETACHED = "DETACHED"
ENABLED = "ENABLED"
HA = "HA"
DATA = "'data'"
ROOT = "'root'"
VM = "'VM'"
VOLUME = "'VOLUME'"
QCOW2 = "'qcow2'"
ISO = "'iso'"
MINI = False
# todo
LOCAL = False
class resource_dict(object):
def __init__(self):
self.running = []
self.enabled = []
self.stopped = []
self.attached = []
self.detached = []
self.deleted = []
self.expunged = []
self.ha = []
self.len = 0
self.group = {}
def __repr__(self):
temp = "Running:"
temp += str(self.running)
temp += "\nStopped:"
temp += str(self.stopped)
temp += "\nEnadbled:"
temp += str(self.enabled)
temp += "\nattached:"
temp += str(self.attached)
temp += "\nDetached:"
temp += str(self.detached)
temp += "\nDeleted:"
temp += str(self.deleted)
temp += "\nExpunged:"
temp += str(self.expunged)
temp += "\nHa:"
temp += str(self.ha)
temp += "\nGroup:"
for k, v in self.group.items():
temp += ("\n\t%s:%s") % (str(k), str(v))
temp += "---%s" % v[0].description
return temp
def __str__(self):
return self.__repr__()
def __add__(self, other):
resource = resource_dict()
resource.group = dict(self.group, **other.group)
resource.running = self.running + other.running
resource.enabled = self.enabled + other.enabled
resource.stopped = self.stopped + other.stopped
resource.attached = self.attached + other.attached
resource.detached = self.detached + other.detached
resource.deleted = self.deleted + other.deleted
resource.expunged = self.expunged + other.expunged
resource.ha = self.ha + other.ha
resource.len = self.len + other.len
return resource
def change_state(self, resource, src_sdt=None, dst_sdt=None):
if src_sdt == RUNNING:
self.running.remove(resource)
elif src_sdt == ENABLED:
self.enabled.remove(resource)
elif src_sdt == ATTACHED:
self.attached.remove(resource)
elif src_sdt == DETACHED:
self.detached.remove(resource)
elif src_sdt == STOPPED:
self.stopped.remove(resource)
elif src_sdt == DELETED:
self.deleted.remove(resource)
elif src_sdt == HA:
self.ha.remove(resource)
if dst_sdt == RUNNING:
self.running.append(resource)
elif dst_sdt == ENABLED:
self.enabled.append(resource)
elif dst_sdt == ATTACHED:
self.attached.append(resource)
elif dst_sdt == DETACHED:
self.detached.append(resource)
elif dst_sdt == STOPPED:
self.stopped.append(resource)
elif dst_sdt == DELETED:
self.deleted.append(resource)
elif dst_sdt == EXPUNGED:
self.expunged.append(resource)
elif dst_sdt == HA:
self.ha.append(resource)
def add(self, resource, dst_sdt=None):
if dst_sdt == RUNNING:
self.running.append(resource)
elif dst_sdt == STOPPED:
self.stopped.append(resource)
elif dst_sdt == ATTACHED:
self.attached.append(resource)
elif dst_sdt == ENABLED:
self.enabled.append(resource)
elif dst_sdt == DETACHED:
self.detached.append(resource)
self.len += 1
def get_not_ha_resource(self):
r_list = []
for resource in self.running:
if resource not in self.ha:
r_list.append(resource)
return r_list
all_volumes = resource_dict()
all_vms = resource_dict()
all_snapshots = resource_dict()
all_backups = resource_dict()
all_images = resource_dict()
def reset():
global all_volumes
global all_vms
global all_snapshots
global all_backups
global all_images
all_volumes = resource_dict()
all_vms = resource_dict()
all_snapshots = resource_dict()
all_backups = resource_dict()
all_images = resource_dict()
class Resource(object):
def __init__(self, name=None, type=None):
print "Resource %s has been created" % self.name
def __repr__(self):
return self.name
def change_state(self, state):
print "Resource [%s] changes state [%s] to [%s]" % (self.name, self.state, state)
self.state = state
def do_change(self, state, action_name):
print "Resource [%s] must changes state [%s] to [%s] to do [%s]" % (self.name, self.state, state, action_name)
pass
class Vm(Resource):
def __init__(self, name=None):
self.state = RUNNING
if not name:
self.name = "'vm" + str(all_vms.len + 1) + "'"
else:
self.name = name
self.root_name = self.name[:-1] + "-root'"
self.haveHA = False
self.volumes = []
self.backups = []
self.snapshots = []
self.root_volume = Volume(self.root_name, type=ROOT)
self.root_volume.vm = self
super(Vm, self).__init__()
def start(self):
all_vms.change_state(self, self.state, RUNNING)
self.state = RUNNING
return "[TestAction.start_vm, %s]" % self.name
def stop(self):
all_vms.change_state(self, self.state, STOPPED)
self.state = STOPPED
return "[TestAction.stop_vm, %s]" % self.name
def delete(self):
all_vms.change_state(self, self.state, DELETED)
self.state = DELETED
if self.haveHA:
all_vms.change_state(self, src_sdt=HA)
self.haveHA = not self.haveHA
for volume in self.volumes:
all_volumes.change_state(volume, ATTACHED, DETACHED)
volume.state = DETACHED
volume.vm = None
self.volumes = []
return "[TestAction.destroy_vm, %s]" % self.name
def expunge(self):
all_vms.change_state(self, self.state, EXPUNGED)
self.state = EXPUNGED
for snapshot in self.snapshots:
all_snapshots.change_state(snapshot, ENABLED, DELETED)
snapshot.state = DELETED
self.snapshots.remove(snapshot)
groupId = snapshot.groupId
if snapshot.groupId:
for snap in all_snapshots.group[snapshot.groupId]:
snap.groupId = None
all_snapshots.group.pop(groupId)
return "[TestAction.expunge_vm, %s]" % self.name
def recover(self):
all_vms.change_state(self, self.state, STOPPED)
self.state = STOPPED
return "[TestAction.recover_vm, %s]" % self.name
def change_ha(self):
if self.haveHA:
all_vms.change_state(self, src_sdt=HA)
else:
all_vms.change_state(self, dst_sdt=HA)
if self.state == STOPPED:
all_vms.change_state(self, self.state, RUNNING)
self.haveHA = not self.haveHA
self.state = RUNNING
return "[TestAction.change_vm_ha, %s]" % self.name
def create(self, tags=None):
all_vms.add(self, RUNNING)
self.state = RUNNING
if tags and "'data_volume=true'" in tags:
volume = Volume("'auto-volume" + str(all_vms.len) + "'")
self.volumes.append(volume)
volume.vm = self
all_volumes.add(volume, ATTACHED)
volume.state = ATTACHED
if MINI:
return "[TestAction.create_mini_vm, %s, %s]" % (self.name, ", ".join(tags))
return "[TestAction.create_vm, %s, %s]" % (self.name, ", ".join(tags))
def reinit(self):
return "[TestAction.reinit_vm, %s]" % self.name
def change_vm_image(self):
return "[TestAction.change_vm_image, %s]" % (self.name)
def migrate(self):
return "[TestAction.migrate_vm, %s]" % self.name
def resize(self, tags):
if not tags:
return "[TestAction.resize_volume, %s, 5*1024*1024]" % self.name
else:
return "[TestAction.resize_volume, %s, %s]" % (self.name, ", ".join(tags))
def clone_vm(self):
new_vm = Vm()
all_vms.add(new_vm, RUNNING)
return "[TestAction.clone_vm, %s, %s]" % (self.name, new_vm.name)
def clone_vm_with_volume(self):
new_vm = Vm()
all_vms.add(new_vm, RUNNING)
for volume in self.volumes:
name = "'clone@" + volume.name[1:]
new_volume = Volume(name)
all_volumes.add(new_volume, ATTACHED)
new_volume.state = ATTACHED
new_volume.vm = new_vm
new_vm.volumes.append(new_volume)
return "[TestAction.clone_vm, %s, %s, 'full']" % (self.name, new_vm.name)
def reboot(self):
return "[TestAction.reboot_vm, %s]" % self.name
def ps_migrate(self):
return "[TestAction.ps_migrate_vm, %s]" % self.name
def create_root_snapshot(self):
# name: vm1-root-snapshot-1
snapshot_name = self.root_name[:-1] + "-snapshot" + str(all_snapshots.len + 1) + "'"
snapshot = Snapshot(snapshot_name)
snapshot.create()
snapshot.set_volume(self.root_volume)
self.snapshots.append(snapshot)
return "[TestAction.create_volume_snapshot, %s, %s]" % (self.root_name, snapshot.name)
def delete_root_snapshot(self, snapshot):
self.snapshots.remove(snapshot)
if snapshot.groupId and all_snapshots.group.has_key(snapshot.groupId):
groupId = snapshot.groupId
for snap in all_snapshots.group[snapshot.groupId]:
snap.groupId = None
all_snapshots.group.pop(groupId)
return snapshot.delete()
def use_root_snapshot(self, snapshot):
return snapshot.use()
def create_vm_snapshot(self):
temp = 0
for key in all_snapshots.group.keys():
if int(key[-1]) > temp:
temp = int(key[-1])
groupId = "vm_snap" + str(temp + 1)
description = (self.name + "@" + "_".join([vol.name for vol in self.volumes])).replace("'", "")
all_snapshots.group[groupId] = []
root_snapshot_name = self.name[:-1] + "-snapshot" + str(all_snapshots.len + 1) + "'"
root_snapshot = Snapshot(root_snapshot_name)
root_snapshot.create()
root_snapshot.set_volume(self.root_volume)
root_snapshot.set_groupId(groupId, description)
self.snapshots.append(root_snapshot)
for volume in self.volumes:
snapshot_name = root_snapshot_name.replace(self.name[:-1], volume.name[:-1])
vol_snapshot = Snapshot(snapshot_name)
vol_snapshot.create()
vol_snapshot.set_volume(volume)
vol_snapshot.set_groupId(groupId, description)
return "[TestAction.create_vm_snapshot, %s, %s]" % (self.name, root_snapshot.name)
def delete_vm_snapshot(self, groupId):
vm_snapshot_name = ''
for snapshot in self.root_volume.snapshots:
if snapshot.groupId == groupId:
self.snapshots.remove(snapshot)
for vol_snapshot in all_snapshots.group[groupId]:
all_snapshots.change_state(vol_snapshot, vol_snapshot.state, DELETED)
if vol_snapshot.volume and vol_snapshot in vol_snapshot.volume.snapshots:
vol_snapshot.volume.snapshots.remove(vol_snapshot)
if vol_snapshot in self.snapshots:
self.snapshots.remove(vol_snapshot)
vol_snapshot.volume = None
vol_snapshot.vm = None
vm_snapshot_name = snapshot.name
for snap in all_snapshots.group[groupId]:
snap.groupId = None
all_snapshots.group.pop(groupId)
return "[TestAction.delete_vm_snapshot, %s]" % vm_snapshot_name
def use_vm_snapshot(self, groupId):
vm_snapshot_name = ''
for snapshot in self.root_volume.snapshots:
if snapshot.groupId == groupId:
vm_snapshot_name = snapshot.name
return "[TestAction.use_vm_snapshot, %s]" % vm_snapshot_name
def create_root_backup(self):
# name: vm1-root-backup-1
backup_name = self.root_name[:-1] + "-backup" + str(all_backups.len + 1) + "'"
backup = Backup(backup_name)
backup.create()
backup.set_volume(self.root_volume)
self.backups.append(backup)
return "[TestAction.create_volume_backup, %s, %s]" % (self.root_name, backup.name)
def delete_root_backup(self, backup):
self.backups.remove(backup)
if backup.groupId and all_backups.group.has_key(backup.groupId):
groupId = backup.groupId
for back in all_backups.group[backup.groupId]:
back.groupId = None
all_backups.group.pop(groupId)
return backup.delete()
def use_root_backup(self, backup):
return backup.use()
def create_vm_backup(self):
groupId = "vm_backup" + str(len(all_backups.group) + 1)
description = (self.name + "@" + "_".join([vol.name for vol in self.volumes])).replace("'", "")
all_backups.group[groupId] = []
root_backup_name = self.name[:-1] + "-backup" + str(all_backups.len + 1) + "'"
root_backup = Backup(root_backup_name)
root_backup.create()
root_backup.set_volume(self.root_volume)
root_backup.set_groupId(groupId, description)
self.backups.append(root_backup)
for volume in self.volumes:
backup_name = root_backup_name.replace(self.name[:-1], volume.name[:-1])
vol_backup = Backup(backup_name)
vol_backup.create()
vol_backup.set_volume(volume)
vol_backup.set_groupId(groupId, description)
return "[TestAction.create_vm_backup, %s, %s]" % (self.name, root_backup.name)
def delete_vm_backup(self, groupId):
vm_backup_name = ''
for backup in self.root_volume.backups:
if backup.groupId == groupId:
self.backups.remove(backup)
for vol_backup in all_backups.group[groupId]:
all_backups.change_state(vol_backup, vol_backup.state, DELETED)
vol_backup.volume.backups.remove(vol_backup)
vol_backup.volume = None
vm_backup_name = backup.name
for back in all_backups.group[groupId]:
back.groupId = None
all_backups.group.pop(groupId)
return "[TestAction.delete_vm_backup, %s]" % vm_backup_name
def use_vm_backup(self, groupId):
vm_backup_name = ''
for backup in self.root_volume.backups:
if backup.groupId == groupId:
vm_backup_name = backup.name
return "[TestAction.use_vm_backup, %s]" % vm_backup_name
def create_image(self):
image = Image(self.name)
image.type = ROOT
all_images.add(image, ENABLED)
return "[TestAction.create_image_from_volume, %s, %s]" % (self.name, image.name)
class Volume(Resource):
def __init__(self, name=None, type=DATA):
if not name:
self.name = "'volume" + str(all_volumes.len + 1) + "'"
else:
self.name = name
Resource.__init__(self)
self.state = DETACHED
self.vm = None
self.backups = []
self.snapshots = []
self.type = type
def create(self, tags):
all_volumes.add(self, DETACHED)
self.state = DETACHED
if tags and "flag" in tags[-1]:
tags[-1] = tags[-1][:-1] + ",scsi" + "'"
elif not tags or "flag" not in tags[-1]:
tags.append("'flag=scsi'")
return "[TestAction.create_volume, %s, %s]" % (self.name, ", ".join(tags))
def attach(self, vm):
all_volumes.change_state(self, self.state, ATTACHED)
self.state = ATTACHED
vm.volumes.append(self)
self.vm = vm
return "[TestAction.attach_volume, %s, %s]" % (vm.name, self.name)
def detach(self):
all_volumes.change_state(self, self.state, DETACHED)
self.state = DETACHED
self.vm.volumes.remove(self)
self.vm = None
return "[TestAction.detach_volume, %s]" % self.name
def resize(self, tags):
if not tags:
return "[TestAction.resize_data_volume, %s, 5*1024*1024]" % self.name
else:
return "[TestAction.resize_data_volume, %s, %s]" % (self.name, ", ".join(tags))
def delete(self):
all_volumes.change_state(self, self.state, DELETED)
self.state = DELETED
if self.vm:
self.vm.volumes.remove(self)
self.vm = None
return "[TestAction.delete_volume, %s]" % self.name
def expunge(self):
all_volumes.change_state(self, self.state, EXPUNGED)
self.state = EXPUNGED
for snapshot in self.snapshots:
all_snapshots.change_state(snapshot, ENABLED, DELETED)
snapshot.state = DELETED
return "[TestAction.expunge_volume, %s]" % self.name
def recover(self):
all_volumes.change_state(self, self.state, DETACHED)
self.state = DETACHED
return "[TestAction.recover_volume, %s]" % self.name
def ps_migrate(self):
return "[TestAction.ps_migrate_volume, %s]" % self.name
def migrate(self):
return "[TestAction.migrate_volume, %s]" % self.name
def create_volume_snapshot(self):
# name: volume1-snapshot1
snapshot_name = self.name[:-1] + "-snapshot" + str(all_snapshots.len + 1) + "'"
snapshot = Snapshot(snapshot_name)
snapshot.create()
self.snapshots.append(snapshot)
snapshot.volume = self
return "[TestAction.create_volume_snapshot, %s, %s]" % (self.name, snapshot.name)
def delete_volume_snapshot(self, snapshot):
self.snapshots.remove(snapshot)
return snapshot.delete()
def use_volme_snapshot(self, snapshot):
return snapshot.use()
def create_volume_backup(self):
backup_name = self.name[:-1] + "-backup" + str(all_backups.len + 1) + "'"
backup = Backup(backup_name)
backup.create()
self.backups.append(backup)
backup.volume = self
return "[TestAction.create_volume_backup, %s, %s]" % (self.name, backup.name)
def delete_volume_backup(self, backup):
self.backups.remove(backup)
return backup.delete()
def use_volume_backup(self, backup):
return backup.use()
def create_image(self):
image = Image(self.name)
image.type = DATA
all_images.add(image, ENABLED)
return "[TestAction.create_data_vol_template_from_volume, %s, %s]" % (self.name, image.name)
class Snapshot(Resource):
def __init__(self, name):
self.name = name
self.state = ENABLED
self.volume = None
self.groupId = None
self.description = ""
Resource.__init__(self)
def create(self):
all_snapshots.add(self, ENABLED)
def set_groupId(self, groupId, description=None):
self.groupId = groupId
self.description = description
all_snapshots.group[groupId].append(self)
def set_volume(self, volume):
self.volume = volume
volume.snapshots.append(self)
def delete(self):
all_snapshots.change_state(self, self.state, DELETED)
self.state = DELETED
self.volume = None
self.vm = None
return "[TestAction.delete_volume_snapshot, %s]" % self.name
def use(self):
return "[TestAction.use_volume_snapshot, %s]" % self.name
def detach_vm_snapshot(self):
for snap in all_snapshots.group[self.groupId]:
snap.groupId = None
all_snapshots.group.pop(self.groupId)
return "[TestAction.ungroup_volume_snapshot, %s]" % self.name
def create_image(self):
image_name = self.name.split('-')[0] + "'"
image = Image(image_name)
if "vm" in self.name:
image.type = ROOT
else:
image.type = DATA
all_images.add(image, ENABLED)
return "[TestAction.create_image_from_snapshot, %s, %s]" % (self.name, image.name)
class Backup(Resource):
def __init__(self, name):
self.name = name
self.state = ENABLED
self.volume = None
self.groupId = None
self.description = ""
Resource.__init__(self)
def create(self):
all_backups.add(self, ENABLED)
def set_groupId(self, groupId, description=None):
self.groupId = groupId
self.description = description
all_backups.group[groupId].append(self)
def set_volume(self, volume):
self.volume = volume
volume.backups.append(self)
def delete(self):
all_backups.change_state(self, self.state, DELETED)
self.state = DELETED
self.volume = None
return "[TestAction.delete_volume_backup, %s]" % self.name
def use(self):
return "[TestAction.use_volume_backup, %s]" % self.name
def create_vm(self):
vm = None
volumes = []
for res in all_backups.group[self.groupId]:
new_name = res.name.split("-")[0] + "-from-" + res.name.split("-")[1]
if "vm" in res.name:
vm = Vm(new_name)
all_vms.add(vm, RUNNING)
else:
volume = Volume(new_name)
all_volumes.add(volume, ATTACHED)
volumes.append(volume)
vm.volumes = volumes
for volume in volumes:
volume.vm = vm
return "[TestAction.create_vm_from_vmbackup, %s]" % self.name
def create_image(self):
image_name = self.name.split('-')[0] + "'"
image = Image(image_name)
if "vm" in self.name:
image.type = ROOT
else:
image.type = DATA
all_images.add(image, ENABLED)
return "[TestAction.create_image_from_backup, %s, %s]" % (self.name, image.name)
class Image(Resource):
def __init__(self, name=None):
if not name:
self.name = "'image" + str(all_images.len + 1) + "'"
else:
self.name = name[:-1] + "-image" + str(all_images.len + 1) + "'"
self.state = ENABLED
self.volume = None
self.groupId = None
self.type = ROOT
self.format = QCOW2
Resource.__init__(self)
def add(self, type = ROOT, url="'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'"):
all_images.add(self, ENABLED)
if self.format == ISO:
url = "os.environ.get('isoForVmUrl')"
return "[TestAction.add_image, %s, %s, %s]" % (self.name, type, url)
def delete(self):
all_images.change_state(self, self.state, DELETED)
self.state = DELETED
return "[TestAction.delete_image, %s]" % self.name
def recover(self):
all_images.change_state(self, self.state, ENABLED)
self.state = ENABLED
return "[TestAction.recover_image, %s]" % self.name
def expunge(self):
all_images.change_state(self, self.state, EXPUNGED)
self.state = EXPUNGED
return "[TestAction.expunge_image, %s]" % self.name
def create_vm(self, tags):
vm = Vm()
all_vms.add(vm, RUNNING)
return "[TestAction.create_vm_by_image, %s, %s, %s]" % (self.name, self.format, vm.name)
def create_volume(self):
# todo: mini robot action must support this function
volume = Volume()
all_volumes.add(volume, DETACHED)
# return [TestAction.create_data_volume_from_image, "volume2", "=scsi"],
return "[TestAction.create_volume_from_image, %s, %s]" % (self.name, volume.name)
def batch_delete_snapshot(snapshots):
for snap in snapshots:
all_snapshots.change_state(snap, snap.state, DELETED)
if snap.groupId and all_snapshots.group.has_key(snap.groupId):
groupId = snap.groupId
for _snap in all_snapshots.group[snap.groupId]:
_snap.groupId = None
all_snapshots.group.pop(groupId)
if "vm" in snap.name:
snap.volume.vm.snapshots.remove(snap)
else:
snap.volume.snapshots.remove(snap)
return ("[TestAction.batch_delete_volume_snapshot, [" + "%s," * len(snapshots) + "]]") % tuple([i.name for i in snapshots])
if __name__ == "__main__":
vm1 = Vm()
print vm1.create(["'data_volume=true'"])
vm2 = Vm()
print vm2.create([])
vol1 = Volume()
print vol1.create([])
# vol2 = Volume()
# print vol2.create([])
# vol3 = Volume()
# print vol3.create([])
print vol1.attach(vm1)
print vm1.delete()
# print vol3.attach(vm1)
# print vol2.attach(vm2)
#
#
# print vol1.create_volume_snapshot()
# print vm1.create_vm_snapshot()
# print vm2.create_vm_snapshot()
print vol1.create_volume_backup()
print vm1.create_vm_backup()
print vm2.create_root_backup()
all_resources = all_vms + all_volumes + all_snapshots + all_backups + all_images
print all_resources
| apache-2.0 | -8,923,569,609,837,443,000 | 33.444898 | 127 | 0.576846 | false | 3.582932 | true | false | false |
arielbro/HarmoniTag | data_acquisition/chords/sources/chordie.py | 1 | 1894 | from bs4 import BeautifulSoup
from urllib2 import urlopen
BASE_URL = "http://www.chordie.com"
def make_soup(url):
html = urlopen(url).read()
return BeautifulSoup(html, "lxml")
def get_url(option):
try:
line = option.findAll("tr")[1].findAll("td")[1].i.a
if line.span.text=="CHORDS":
return BASE_URL+ line["href"]
else: return None
except:
return None
"""
Chordie is a based on a chord search engine - hence it contains several results for each song.
We filter out the non-Chord type of results, and parse each of the remaining into a chord vector.
Returns an array containing all available chord vectors.
"""
def get_chord_options(chords_url):
soup = make_soup(chords_url)
all_text = soup.find("div", {"id": "resultc"})
if not all_text: return None
all_song_chords = all_text.findAll("table")
chord_options = []
for option in all_song_chords:
url = get_url(option)
if not url==None:
chord_vector = get_chord_vector(url)
if not chord_vector==None and len(chord_vector)>0:
#print url+": "+str(chord_vector)
chord_options.append(chord_vector)
return chord_options
"""
Given a relevant url, parses it into a chord vector
"""
def get_chord_vector(chords_url):
soup = make_soup(chords_url)
song_chords = soup.findAll("td", "c")
chord_vector = [str(chord.text) for chord in song_chords]
return chord_vector
"""
Given (artist, title), scrapes *all* suiteble chord vectors
"""
def get_chords(title,artist):
name = "songtitle/"+title.replace(' ','+')+"/songartist/"+artist.replace(' ','+')+"/index.html"
all_chords_url = (BASE_URL +"/allsongs.php/"+ name)
chord_options = get_chord_options(all_chords_url)
return chord_options
def test():
get_chords("Suzanne", "leonard cohen") | gpl-2.0 | 4,818,141,216,001,924,000 | 30.065574 | 99 | 0.63622 | false | 3.282496 | false | false | false |
hashware/tflearn-learn | examples/nlp/seq2seq_example.py | 1 | 27637 | '''
Pedagogical example realization of seq2seq recurrent neural networks, using TensorFlow and TFLearn.
More info at https://github.com/ichuang/tflearn_seq2seq
'''
from __future__ import division, print_function
import os
import sys
import tflearn
import argparse
import json
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import seq2seq
from tensorflow.python.ops import rnn_cell
#-----------------------------------------------------------------------------
class SequencePattern(object):
INPUT_SEQUENCE_LENGTH = 10
OUTPUT_SEQUENCE_LENGTH = 10
INPUT_MAX_INT = 9
OUTPUT_MAX_INT = 9
PATTERN_NAME = "sorted"
def __init__(self, name=None, in_seq_len=None, out_seq_len=None):
if name is not None:
assert hasattr(self, "%s_sequence" % name)
self.PATTERN_NAME = name
if in_seq_len:
self.INPUT_SEQUENCE_LENGTH = in_seq_len
if out_seq_len:
self.OUTPUT_SEQUENCE_LENGTH = out_seq_len
def generate_output_sequence(self, x):
'''
For a given input sequence, generate the output sequence. x is a 1D numpy array
of integers, with length INPUT_SEQUENCE_LENGTH.
Returns a 1D numpy array of length OUTPUT_SEQUENCE_LENGTH
This procedure defines the pattern which the seq2seq RNN will be trained to find.
'''
return getattr(self, "%s_sequence" % self.PATTERN_NAME)(x)
def maxmin_dup_sequence(self, x):
'''
Generate sequence with [max, min, rest of original entries]
'''
x = np.array(x)
y = [ x.max(), x.min()] + list(x[2:])
return np.array(y)[:self.OUTPUT_SEQUENCE_LENGTH] # truncate at out seq len
def sorted_sequence(self, x):
'''
Generate sorted version of original sequence
'''
return np.array( sorted(x) )[:self.OUTPUT_SEQUENCE_LENGTH]
def reversed_sequence(self, x):
'''
Generate reversed version of original sequence
'''
return np.array( x[::-1] )[:self.OUTPUT_SEQUENCE_LENGTH]
#-----------------------------------------------------------------------------
class TFLearnSeq2Seq(object):
'''
seq2seq recurrent neural network, implemented using TFLearn.
'''
AVAILABLE_MODELS = ["embedding_rnn", "embedding_attention"]
def __init__(self, sequence_pattern, seq2seq_model=None, verbose=None, name=None, data_dir=None):
'''
sequence_pattern_class = a SequencePattern class instance, which defines pattern parameters
(input, output lengths, name, generating function)
seq2seq_model = string specifying which seq2seq model to use, e.g. "embedding_rnn"
'''
self.sequence_pattern = sequence_pattern
self.seq2seq_model = seq2seq_model or "embedding_rnn"
assert self.seq2seq_model in self.AVAILABLE_MODELS
self.in_seq_len = self.sequence_pattern.INPUT_SEQUENCE_LENGTH
self.out_seq_len = self.sequence_pattern.OUTPUT_SEQUENCE_LENGTH
self.in_max_int = self.sequence_pattern.INPUT_MAX_INT
self.out_max_int = self.sequence_pattern.OUTPUT_MAX_INT
self.verbose = verbose or 0
self.n_input_symbols = self.in_max_int + 1
self.n_output_symbols = self.out_max_int + 2 # extra one for GO symbol
self.model_instance = None
self.name = name
self.data_dir = data_dir
def generate_trainig_data(self, num_points):
'''
Generate training dataset. Produce random (integer) sequences X, and corresponding
expected output sequences Y = generate_output_sequence(X).
Return xy_data, y_data (both of type uint32)
xy_data = numpy array of shape [num_points, in_seq_len + out_seq_len], with each point being X + Y
y_data = numpy array of shape [num_points, out_seq_len]
'''
x_data = np.random.randint(0, self.in_max_int, size=(num_points, self.in_seq_len)) # shape [num_points, in_seq_len]
x_data = x_data.astype(np.uint32) # ensure integer type
y_data = [ self.sequence_pattern.generate_output_sequence(x) for x in x_data ]
y_data = np.array(y_data)
xy_data = np.append(x_data, y_data, axis=1) # shape [num_points, 2*seq_len]
return xy_data, y_data
def sequence_loss(self, y_pred, y_true):
'''
Loss function for the seq2seq RNN. Reshape predicted and true (label) tensors, generate dummy weights,
then use seq2seq.sequence_loss to actually compute the loss function.
'''
if self.verbose > 2: print ("my_sequence_loss y_pred=%s, y_true=%s" % (y_pred, y_true))
logits = tf.unstack(y_pred, axis=1) # list of [-1, num_decoder_synbols] elements
targets = tf.unstack(y_true, axis=1) # y_true has shape [-1, self.out_seq_len]; unpack to list of self.out_seq_len [-1] elements
if self.verbose > 2:
print ("my_sequence_loss logits=%s" % (logits,))
print ("my_sequence_loss targets=%s" % (targets,))
weights = [tf.ones_like(yp, dtype=tf.float32) for yp in targets]
if self.verbose > 4: print ("my_sequence_loss weights=%s" % (weights,))
sl = seq2seq.sequence_loss(logits, targets, weights)
if self.verbose > 2: print ("my_sequence_loss return = %s" % sl)
return sl
def accuracy(self, y_pred, y_true, x_in): # y_pred is [-1, self.out_seq_len, num_decoder_symbols]; y_true is [-1, self.out_seq_len]
'''
Compute accuracy of the prediction, based on the true labels. Use the average number of equal
values.
'''
pred_idx = tf.to_int32(tf.argmax(y_pred, 2)) # [-1, self.out_seq_len]
if self.verbose > 2: print ("my_accuracy pred_idx = %s" % pred_idx)
accuracy = tf.reduce_mean(tf.cast(tf.equal(pred_idx, y_true), tf.float32), name='acc')
return accuracy
def model(self, mode="train", num_layers=1, cell_size=32, cell_type="BasicLSTMCell", embedding_size=20, learning_rate=0.0001,
tensorboard_verbose=0, checkpoint_path=None):
'''
Build tensor specifying graph of operations for the seq2seq neural network model.
mode = string, either "train" or "predict"
cell_type = attribute of rnn_cell specifying which RNN cell type to use
cell_size = size for the hidden layer in the RNN cell
num_layers = number of RNN cell layers to use
Return TFLearn model instance. Use DNN model for this.
'''
assert mode in ["train", "predict"]
checkpoint_path = checkpoint_path or ("%s%ss2s_checkpoint.tfl" % (self.data_dir or "", "/" if self.data_dir else ""))
GO_VALUE = self.out_max_int + 1 # unique integer value used to trigger decoder outputs in the seq2seq RNN
network = tflearn.input_data(shape=[None, self.in_seq_len + self.out_seq_len], dtype=tf.int32, name="XY")
encoder_inputs = tf.slice(network, [0, 0], [-1, self.in_seq_len], name="enc_in") # get encoder inputs
encoder_inputs = tf.unstack(encoder_inputs, axis=1) # transform into list of self.in_seq_len elements, each [-1]
decoder_inputs = tf.slice(network, [0, self.in_seq_len], [-1, self.out_seq_len], name="dec_in") # get decoder inputs
decoder_inputs = tf.unstack(decoder_inputs, axis=1) # transform into list of self.out_seq_len elements, each [-1]
go_input = tf.multiply( tf.ones_like(decoder_inputs[0], dtype=tf.int32), GO_VALUE ) # insert "GO" symbol as the first decoder input; drop the last decoder input
decoder_inputs = [go_input] + decoder_inputs[: self.out_seq_len-1] # insert GO as first; drop last decoder input
feed_previous = not (mode=="train")
if self.verbose > 3:
print ("feed_previous = %s" % str(feed_previous))
print ("encoder inputs: %s" % str(encoder_inputs))
print ("decoder inputs: %s" % str(decoder_inputs))
print ("len decoder inputs: %s" % len(decoder_inputs))
self.n_input_symbols = self.in_max_int + 1 # default is integers from 0 to 9
self.n_output_symbols = self.out_max_int + 2 # extra "GO" symbol for decoder inputs
single_cell = getattr(rnn_cell, cell_type)(cell_size, state_is_tuple=True)
if num_layers==1:
cell = single_cell
else:
cell = rnn_cell.MultiRNNCell([single_cell] * num_layers)
if self.seq2seq_model=="embedding_rnn":
model_outputs, states = seq2seq.embedding_rnn_seq2seq(encoder_inputs, # encoder_inputs: A list of 2D Tensors [batch_size, input_size].
decoder_inputs,
cell,
num_encoder_symbols=self.n_input_symbols,
num_decoder_symbols=self.n_output_symbols,
embedding_size=embedding_size,
feed_previous=feed_previous)
elif self.seq2seq_model=="embedding_attention":
model_outputs, states = seq2seq.embedding_attention_seq2seq(encoder_inputs, # encoder_inputs: A list of 2D Tensors [batch_size, input_size].
decoder_inputs,
cell,
num_encoder_symbols=self.n_input_symbols,
num_decoder_symbols=self.n_output_symbols,
embedding_size=embedding_size,
num_heads=1,
initial_state_attention=False,
feed_previous=feed_previous)
else:
raise Exception('[TFLearnSeq2Seq] Unknown seq2seq model %s' % self.seq2seq_model)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + "seq2seq_model", model_outputs) # for TFLearn to know what to save and restore
# model_outputs: list of the same length as decoder_inputs of 2D Tensors with shape [batch_size x output_size] containing the generated outputs.
if self.verbose > 2: print ("model outputs: %s" % model_outputs)
network = tf.stack(model_outputs, axis=1) # shape [-1, n_decoder_inputs (= self.out_seq_len), num_decoder_symbols]
if self.verbose > 2: print ("packed model outputs: %s" % network)
if self.verbose > 3:
all_vars = tf.get_collection(tf.GraphKeys.VARIABLES)
print ("all_vars = %s" % all_vars)
with tf.name_scope("TargetsData"): # placeholder for target variable (i.e. trainY input)
targetY = tf.placeholder(shape=[None, self.out_seq_len], dtype=tf.int32, name="Y")
network = tflearn.regression(network,
placeholder=targetY,
optimizer='adam',
learning_rate=learning_rate,
loss=self.sequence_loss,
metric=self.accuracy,
name="Y")
model = tflearn.DNN(network, tensorboard_verbose=tensorboard_verbose, checkpoint_path=checkpoint_path)
return model
def train(self, num_epochs=20, num_points=100000, model=None, model_params=None, weights_input_fn=None,
validation_set=0.1, snapshot_step=5000, batch_size=128, weights_output_fn=None):
'''
Train model, with specified number of epochs, and dataset size.
Use specified model, or create one if not provided. Load initial weights from file weights_input_fn,
if provided. validation_set specifies what to use for the validation.
Returns logits for prediction, as an numpy array of shape [out_seq_len, n_output_symbols].
'''
trainXY, trainY = self.generate_trainig_data(num_points)
print ("[TFLearnSeq2Seq] Training on %d point dataset (pattern '%s'), with %d epochs" % (num_points,
self.sequence_pattern.PATTERN_NAME,
num_epochs))
if self.verbose > 1:
print (" model parameters: %s" % json.dumps(model_params, indent=4))
model_params = model_params or {}
model = model or self.setup_model("train", model_params, weights_input_fn)
model.fit(trainXY, trainY,
n_epoch=num_epochs,
validation_set=validation_set,
batch_size=batch_size,
shuffle=True,
show_metric=True,
snapshot_step=snapshot_step,
snapshot_epoch=False,
run_id="TFLearnSeq2Seq"
)
print ("Done!")
if weights_output_fn is not None:
weights_output_fn = self.canonical_weights_fn(weights_output_fn)
model.save(weights_output_fn)
print ("Saved %s" % weights_output_fn)
self.weights_output_fn = weights_output_fn
return model
def canonical_weights_fn(self, iteration_num=0):
'''
Construct canonical weights filename, based on model and pattern names.
'''
if not type(iteration_num)==int:
try:
iteration_num = int(iteration_num)
except Exception as err:
return iteration_num
model_name = self.name or "basic"
wfn = "ts2s__%s__%s_%s.tfl" % (model_name, self.sequence_pattern.PATTERN_NAME, iteration_num)
if self.data_dir:
wfn = "%s/%s" % (self.data_dir, wfn)
self.weights_filename = wfn
return wfn
def setup_model(self, mode, model_params=None, weights_input_fn=None):
'''
Setup a model instance, using the specified mode and model parameters.
Load the weights from the specified file, if it exists.
If weights_input_fn is an integer, use that the model name, and
the pattern name, to construct a canonical filename.
'''
model_params = model_params or {}
model = self.model_instance or self.model(mode=mode, **model_params)
self.model_instance = model
if weights_input_fn:
if type(weights_input_fn)==int:
weights_input_fn = self.canonical_weights_fn(weights_input_fn)
if os.path.exists(weights_input_fn):
model.load(weights_input_fn)
print ("[TFLearnSeq2Seq] model weights loaded from %s" % weights_input_fn)
else:
print ("[TFLearnSeq2Seq] MISSING model weights file %s" % weights_input_fn)
return model
def predict(self, Xin, model=None, model_params=None, weights_input_fn=None):
'''
Make a prediction, using the seq2seq model, for the given input sequence Xin.
If model is not provided, create one (or use last created instance).
Return prediction, y
prediction = array of integers, giving output prediction. Length = out_seq_len
y = array of shape [out_seq_len, out_max_int], giving logits for output prediction
'''
if not model:
model = self.model_instance or self.setup_model("predict", model_params, weights_input_fn)
if self.verbose: print ("Xin = %s" % str(Xin))
X = np.array(Xin).astype(np.uint32)
assert len(X)==self.in_seq_len
if self.verbose:
print ("X Input shape=%s, data=%s" % (X.shape, X))
print ("Expected output = %s" % str(self.sequence_pattern.generate_output_sequence(X)))
Yin = [0]*self.out_seq_len
XY = np.append(X, np.array(Yin).astype(np.float32))
XY = XY.reshape([-1, self.in_seq_len + self.out_seq_len]) # batch size 1
if self.verbose > 1: print ("XY Input shape=%s, data=%s" % (XY.shape, XY))
res = model.predict(XY)
res = np.array(res)
if self.verbose > 1: print ("prediction shape = %s" % str(res.shape))
y = res.reshape(self.out_seq_len, self.n_output_symbols)
prediction = np.argmax(y, axis=1)
if self.verbose:
print ("Predicted output sequence: %s" % str(prediction))
return prediction, y
#-----------------------------------------------------------------------------
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
curval = getattr(args, self.dest, 0) or 0
values=values.count('v')+1
setattr(args, self.dest, values + curval)
#-----------------------------------------------------------------------------
def CommandLine(args=None, arglist=None):
'''
Main command line. Accepts args, to allow for simple unit testing.
'''
help_text = """
Commands:
train - give size of training set to use, as argument
predict - give input sequence as argument (or specify inputs via --from-file <filename>)
"""
parser = argparse.ArgumentParser(description=help_text, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("cmd", help="command")
parser.add_argument("cmd_input", nargs='*', help="input to command")
parser.add_argument('-v', "--verbose", nargs=0, help="increase output verbosity (add more -v to increase versbosity)", action=VAction, dest='verbose')
parser.add_argument("-m", "--model", help="seq2seq model name: either embedding_rnn (default) or embedding_attention", default=None)
parser.add_argument("-r", "--learning-rate", type=float, help="learning rate (default 0.0001)", default=0.0001)
parser.add_argument("-e", "--epochs", type=int, help="number of trainig epochs", default=10)
parser.add_argument("-i", "--input-weights", type=str, help="tflearn file with network weights to load", default=None)
parser.add_argument("-o", "--output-weights", type=str, help="new tflearn file where network weights are to be saved", default=None)
parser.add_argument("-p", "--pattern-name", type=str, help="name of pattern to use for sequence", default=None)
parser.add_argument("-n", "--name", type=str, help="name of model, used when generating default weights filenames", default=None)
parser.add_argument("--in-len", type=int, help="input sequence length (default 10)", default=None)
parser.add_argument("--out-len", type=int, help="output sequence length (default 10)", default=None)
parser.add_argument("--from-file", type=str, help="name of file to take input data sequences from (json format)", default=None)
parser.add_argument("--iter-num", type=int, help="training iteration number; specify instead of input- or output-weights to use generated filenames", default=None)
parser.add_argument("--data-dir", help="directory to use for storing checkpoints (also used when generating default weights filenames)", default=None)
# model parameters
parser.add_argument("-L", "--num-layers", type=int, help="number of RNN layers to use in the model (default 1)", default=1)
parser.add_argument("--cell-size", type=int, help="size of RNN cell to use (default 32)", default=32)
parser.add_argument("--cell-type", type=str, help="type of RNN cell to use (default BasicLSTMCell)", default="BasicLSTMCell")
parser.add_argument("--embedding-size", type=int, help="size of embedding to use (default 20)", default=20)
parser.add_argument("--tensorboard-verbose", type=int, help="tensorboard verbosity level (default 0)", default=0)
if not args:
args = parser.parse_args(arglist)
if args.iter_num is not None:
args.input_weights = args.iter_num
args.output_weights = args.iter_num + 1
model_params = dict(num_layers=args.num_layers,
cell_size=args.cell_size,
cell_type=args.cell_type,
embedding_size=args.embedding_size,
learning_rate=args.learning_rate,
tensorboard_verbose=args.tensorboard_verbose,
)
if args.cmd=="train":
try:
num_points = int(args.cmd_input[0])
except:
raise Exception("Please specify the number of datapoints to use for training, as the first argument")
sp = SequencePattern(args.pattern_name, in_seq_len=args.in_len, out_seq_len=args.out_len)
ts2s = TFLearnSeq2Seq(sp, seq2seq_model=args.model, data_dir=args.data_dir, name=args.name, verbose=args.verbose)
ts2s.train(num_epochs=args.epochs, num_points=num_points, weights_output_fn=args.output_weights,
weights_input_fn=args.input_weights, model_params=model_params)
return ts2s
elif args.cmd=="predict":
if args.from_file:
inputs = json.loads(args.from_file)
try:
input_x = map(int, args.cmd_input)
inputs = [input_x]
except:
raise Exception("Please provide a space-delimited input sequence as the argument")
sp = SequencePattern(args.pattern_name, in_seq_len=args.in_len, out_seq_len=args.out_len)
ts2s = TFLearnSeq2Seq(sp, seq2seq_model=args.model, data_dir=args.data_dir, name=args.name, verbose=args.verbose)
results = []
for x in inputs:
prediction, y = ts2s.predict(x, weights_input_fn=args.input_weights, model_params=model_params)
print("==> For input %s, prediction=%s (expected=%s)" % (x, prediction, sp.generate_output_sequence(x)))
results.append([prediction, y])
ts2s.prediction_results = results
return ts2s
else:
print("Unknown command %s" % args.cmd)
#-----------------------------------------------------------------------------
# unit tests
def test_sp1():
'''
Test two different SequencePattern instances
'''
sp = SequencePattern("maxmin_dup")
y = sp.generate_output_sequence(range(10))
assert all(y==np.array([9, 0, 2, 3, 4, 5, 6, 7, 8, 9]))
sp = SequencePattern("sorted")
y = sp.generate_output_sequence([5,6,1,2,9])
assert all(y==np.array([1, 2, 5, 6, 9]))
sp = SequencePattern("reversed")
y = sp.generate_output_sequence(range(10))
assert all(y==np.array([9, 8, 7, 6, 5, 4, 3, 2, 1, 0]))
def test_sp2():
'''
Test two SequencePattern instance with lengths different from default
'''
sp = SequencePattern("sorted", in_seq_len=20, out_seq_len=5)
x = np.random.randint(0, 9, 20)
y = sp.generate_output_sequence(x)
assert len(y)==5
y_exp = sorted(x)[:5]
assert all(y==y_exp)
def test_train1():
'''
Test simple training of an embedding_rnn seq2seq model
'''
sp = SequencePattern()
ts2s = TFLearnSeq2Seq(sp)
ofn = "test_%s" % ts2s.canonical_weights_fn(0)
print ("using weights filename %s" % ofn)
if os.path.exists(ofn):
os.unlink(ofn)
tf.reset_default_graph()
ts2s.train(num_epochs=1, num_points=10000, weights_output_fn=ofn)
assert os.path.exists(ofn)
def test_predict1():
'''
Test simple preductions using weights just produced (in test_train1)
'''
sp = SequencePattern()
ts2s = TFLearnSeq2Seq(sp, verbose=1)
wfn = "test_%s" % ts2s.canonical_weights_fn(0)
print ("using weights filename %s" % wfn)
tf.reset_default_graph()
prediction, y = ts2s.predict(Xin=range(10), weights_input_fn=wfn)
assert len(prediction==10)
def test_train_predict2():
'''
Test that the embedding_attention model works, with saving and loading of weights
'''
import tempfile
sp = SequencePattern()
tempdir = tempfile.mkdtemp()
ts2s = TFLearnSeq2Seq(sp, seq2seq_model="embedding_attention", data_dir=tempdir, name="attention")
tf.reset_default_graph()
ts2s.train(num_epochs=1, num_points=1000, weights_output_fn=1, weights_input_fn=0)
assert os.path.exists(ts2s.weights_output_fn)
tf.reset_default_graph()
ts2s = TFLearnSeq2Seq(sp, seq2seq_model="embedding_attention", data_dir="DATA", name="attention", verbose=1)
prediction, y = ts2s.predict(Xin=range(10), weights_input_fn=1)
assert len(prediction==10)
os.system("rm -rf %s" % tempdir)
def test_train_predict3():
'''
Test that a model trained on sequencees of one length can be used for predictions on other sequence lengths
'''
import tempfile
sp = SequencePattern("sorted", in_seq_len=10, out_seq_len=10)
tempdir = tempfile.mkdtemp()
ts2s = TFLearnSeq2Seq(sp, seq2seq_model="embedding_attention", data_dir=tempdir, name="attention")
tf.reset_default_graph()
ts2s.train(num_epochs=1, num_points=1000, weights_output_fn=1, weights_input_fn=0)
assert os.path.exists(ts2s.weights_output_fn)
tf.reset_default_graph()
sp = SequencePattern("sorted", in_seq_len=20, out_seq_len=8)
tf.reset_default_graph()
ts2s = TFLearnSeq2Seq(sp, seq2seq_model="embedding_attention", data_dir="DATA", name="attention", verbose=1)
x = np.random.randint(0, 9, 20)
prediction, y = ts2s.predict(x, weights_input_fn=1)
assert len(prediction==8)
os.system("rm -rf %s" % tempdir)
def test_main1():
'''
Integration test - training
'''
import tempfile
tempdir = tempfile.mkdtemp()
arglist = "--data-dir %s -e 2 --iter-num=1 -v -v --tensorboard-verbose=1 train 5000" % tempdir
arglist = arglist.split(' ')
tf.reset_default_graph()
ts2s = CommandLine(arglist=arglist)
assert os.path.exists(ts2s.weights_output_fn)
os.system("rm -rf %s" % tempdir)
def test_main2():
'''
Integration test - training then prediction
'''
import tempfile
tempdir = tempfile.mkdtemp()
arglist = "--data-dir %s -e 2 --iter-num=1 -v -v --tensorboard-verbose=1 train 5000" % tempdir
arglist = arglist.split(' ')
tf.reset_default_graph()
ts2s = CommandLine(arglist=arglist)
wfn = ts2s.weights_output_fn
assert os.path.exists(wfn)
arglist = "-i %s predict 1 2 3 4 5 6 7 8 9 0" % wfn
arglist = arglist.split(' ')
tf.reset_default_graph()
ts2s = CommandLine(arglist=arglist)
assert len(ts2s.prediction_results[0][0])==10
os.system("rm -rf %s" % tempdir)
def test_main3():
'''
Integration test - training then prediction: attention model
'''
import tempfile
wfn = "tmp_weights.tfl"
if os.path.exists(wfn):
os.unlink(wfn)
arglist = "-e 2 -o tmp_weights.tfl -v -v -v -v -m embedding_attention train 5000"
arglist = arglist.split(' ')
tf.reset_default_graph()
ts2s = CommandLine(arglist=arglist)
assert os.path.exists(wfn)
arglist = "-i tmp_weights.tfl -v -v -v -v -m embedding_attention predict 1 2 3 4 5 6 7 8 9 0"
arglist = arglist.split(' ')
tf.reset_default_graph()
ts2s = CommandLine(arglist=arglist)
assert len(ts2s.prediction_results[0][0])==10
#-----------------------------------------------------------------------------
if __name__=="__main__":
CommandLine()
| mit | 6,505,902,849,354,478,000 | 46.081772 | 168 | 0.595036 | false | 3.756558 | true | false | false |
ArvinPan/pyzmq | examples/security/woodhouse.py | 6 | 2293 | #!/usr/bin/env python
'''
Woodhouse extends Strawhouse with a name and password check.
This uses the PLAIN mechanism which does plain-text username and password authentication).
It's not really secure, and anyone sniffing the network (trivial with WiFi)
can capture passwords and then login.
Author: Chris Laws
'''
import logging
import sys
import zmq
import zmq.auth
from zmq.auth.thread import ThreadAuthenticator
def run():
'''Run woodhouse example'''
valid_client_test_pass = False
invalid_client_test_pass = False
ctx = zmq.Context.instance()
# Start an authenticator for this context.
auth = ThreadAuthenticator(ctx)
auth.start()
auth.allow('127.0.0.1')
# Instruct authenticator to handle PLAIN requests
auth.configure_plain(domain='*', passwords={'admin': 'secret'})
server = ctx.socket(zmq.PUSH)
server.plain_server = True # must come before bind
server.bind('tcp://*:9000')
client = ctx.socket(zmq.PULL)
client.plain_username = b'admin'
client.plain_password = b'secret'
client.connect('tcp://127.0.0.1:9000')
server.send(b"Hello")
if client.poll():
msg = client.recv()
if msg == b"Hello":
valid_client_test_pass = True
client.close()
# now use invalid credentials - expect no msg received
client2 = ctx.socket(zmq.PULL)
client2.plain_username = b'admin'
client2.plain_password = b'bogus'
client2.connect('tcp://127.0.0.1:9000')
server.send(b"World")
if client2.poll(50):
msg = client.recv()
if msg == "World":
invalid_client_test_pass = False
else:
# no message is expected
invalid_client_test_pass = True
# stop auth thread
auth.stop()
if valid_client_test_pass and invalid_client_test_pass:
logging.info("Woodhouse test OK")
else:
logging.error("Woodhouse test FAIL")
if __name__ == '__main__':
if zmq.zmq_version_info() < (4,0):
raise RuntimeError("Security is not supported in libzmq version < 4.0. libzmq version {0}".format(zmq.zmq_version()))
if '-v' in sys.argv:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level, format="[%(levelname)s] %(message)s")
run()
| bsd-3-clause | 4,298,470,329,914,838,500 | 24.477778 | 125 | 0.648059 | false | 3.571651 | true | false | false |
erja-gp/openthread | tools/harness-automation/autothreadharness/harness_controller.py | 16 | 5053 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ConfigParser
import logging
import os
import subprocess
import time
from autothreadharness import settings
logger = logging.getLogger(__name__)
HARNESS_SVN_VERSION_R44 = 1471
"""int: this is the first published release that miniweb was removed from Harness"""
def _try_kill(proc):
logger.info('Try kill process')
times = 1
while proc.poll() is None:
proc.kill()
time.sleep(5)
if proc.poll() is not None:
logger.info('Process has been killed')
break
logger.info('Trial %d failed', times)
times += 1
if times > 3:
raise SystemExit()
class HarnessController(object):
"""Harness service control
This controls harness service, including the harness back-end and front-end.
"""
harness = None
"""harness back-end"""
miniweb = None
"""harness front-end"""
def __init__(self, result_dir=None):
self.result_dir = result_dir
self.harness_file = ''
harness_info = ConfigParser.ConfigParser()
harness_info.read('%s\\info.ini' % settings.HARNESS_HOME)
self.version = harness_info.getint('Thread_Harness_Info', 'SVN')
def start(self):
logger.info('Starting harness service')
if self.harness:
logger.warning('Harness already started')
else:
env = dict(os.environ, PYTHONPATH='%s\\Thread_Harness;%s\\ThirdParty\\hsdk-python\\src'
% (settings.HARNESS_HOME, settings.HARNESS_HOME))
self.harness_file = '%s\\harness-%s.log' % (self.result_dir, time.strftime('%Y%m%d%H%M%S'))
with open(self.harness_file, 'w') as harness_out:
self.harness = subprocess.Popen([settings.HARNESS_HOME + '\\Python27\\python.exe',
settings.HARNESS_HOME + '\\Thread_Harness\\Run.py'],
cwd=settings.HARNESS_HOME,
stdout=harness_out,
stderr=harness_out,
env=env)
time.sleep(2)
if self.version >= HARNESS_SVN_VERSION_R44:
return
if self.miniweb:
logger.warning('Miniweb already started')
else:
with open('%s\\miniweb-%s.log' % (self.result_dir, time.strftime('%Y%m%d%H%M%S')), 'w') as miniweb_out:
self.miniweb = subprocess.Popen([settings.HARNESS_HOME + '\\MiniWeb\\miniweb.exe'],
stdout=miniweb_out,
stderr=miniweb_out,
cwd=settings.HARNESS_HOME + '\\MiniWeb')
def stop(self):
logger.info('Stopping harness service')
if self.harness:
_try_kill(self.harness)
self.harness = None
else:
logger.warning('Harness not started yet')
if self.version >= HARNESS_SVN_VERSION_R44:
return
if self.miniweb:
_try_kill(self.miniweb)
self.miniweb = None
else:
logger.warning('Miniweb not started yet')
def tail(self):
with open(self.harness_file) as harness_out:
harness_out.seek(-100, 2)
return ''.join(harness_out.readlines())
def __del__(self):
self.stop()
| bsd-3-clause | -7,499,389,205,181,361,000 | 36.154412 | 115 | 0.610924 | false | 4.356034 | false | false | false |
OCA/contract | agreement_legal/models/agreement.py | 1 | 14411 | # Copyright (C) 2018 - TODAY, Pavlov Media
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
class Agreement(models.Model):
_inherit = "agreement"
# General
name = fields.Char(string="Title", required=True)
version = fields.Integer(
string="Version",
default=1,
copy=False,
help="The versions are used to keep track of document history and "
"previous versions can be referenced.")
revision = fields.Integer(
string="Revision",
default=0,
copy=False,
help="The revision will increase with every save event.")
description = fields.Text(
string="Description",
track_visibility="onchange",
help="Description of the agreement")
dynamic_description = fields.Text(
compute="_compute_dynamic_description",
string="Dynamic Description",
help="Compute dynamic description")
start_date = fields.Date(
string="Start Date",
track_visibility="onchange",
help="When the agreement starts.")
end_date = fields.Date(
string="End Date",
track_visibility="onchange",
help="When the agreement ends.")
color = fields.Integer(string="Color")
active = fields.Boolean(
string="Active",
default=True,
help="If unchecked, it will allow you to hide the agreement without "
"removing it.")
company_signed_date = fields.Date(
string="Signed on",
track_visibility="onchange",
help="Date the contract was signed by Company.")
partner_signed_date = fields.Date(
string="Signed on (Partner)",
track_visibility="onchange",
help="Date the contract was signed by the Partner.")
term = fields.Integer(
string="Term (Months)",
track_visibility="onchange",
help="Number of months this agreement/contract is in effect with the "
"partner.")
expiration_notice = fields.Integer(
string="Exp. Notice (Days)",
track_visibility="onchange",
help="Number of Days before expiration to be notified.")
change_notice = fields.Integer(
string="Change Notice (Days)",
track_visibility="onchange",
help="Number of Days to be notified before changes.")
special_terms = fields.Text(
string="Special Terms",
track_visibility="onchange",
help="Any terms that you have agreed to and want to track on the "
"agreement/contract.")
dynamic_special_terms = fields.Text(
compute="_compute_dynamic_special_terms",
string="Dynamic Special Terms",
help="Compute dynamic special terms")
code = fields.Char(
string="Reference",
required=True,
default=lambda self: _("New"),
track_visibility="onchange",
copy=False,
help="ID used for internal contract tracking.")
increase_type_id = fields.Many2one(
"agreement.increasetype",
string="Increase Type",
track_visibility="onchange",
help="The amount that certain rates may increase.")
termination_requested = fields.Date(
string="Termination Requested Date",
track_visibility="onchange",
help="Date that a request for termination was received.")
termination_date = fields.Date(
string="Termination Date",
track_visibility="onchange",
help="Date that the contract was terminated.")
reviewed_date = fields.Date(
string="Reviewed Date", track_visibility="onchange")
reviewed_user_id = fields.Many2one(
"res.users", string="Reviewed By", track_visibility="onchange")
approved_date = fields.Date(
string="Approved Date", track_visibility="onchange")
approved_user_id = fields.Many2one(
"res.users", string="Approved By", track_visibility="onchange")
currency_id = fields.Many2one("res.currency", string="Currency")
partner_id = fields.Many2one(
"res.partner",
string="Partner",
required=False,
copy=True,
help="The customer or vendor this agreement is related to.")
partner_contact_id = fields.Many2one(
"res.partner",
string="Partner Contact",
copy=True,
help="The primary partner contact (If Applicable).")
partner_contact_phone = fields.Char(
related="partner_contact_id.phone", string="Partner Phone")
partner_contact_email = fields.Char(
related="partner_contact_id.email", string="Partner Email")
company_contact_id = fields.Many2one(
"res.partner",
string="Company Contact",
copy=True,
help="The primary contact in the company.")
company_contact_phone = fields.Char(
related="company_contact_id.phone", string="Phone")
company_contact_email = fields.Char(
related="company_contact_id.email", string="Email")
use_parties_content = fields.Boolean(
string="Use parties content",
help="Use custom content for parties")
company_partner_id = fields.Many2one(
related="company_id.partner_id", string="Company's Partner")
def _get_default_parties(self):
deftext = """
<h3>Company Information</h3>
<p>
${object.company_id.partner_id.name or ''}.<br>
${object.company_id.partner_id.street or ''} <br>
${object.company_id.partner_id.state_id.code or ''}
${object.company_id.partner_id.zip or ''}
${object.company_id.partner_id.city or ''}<br>
${object.company_id.partner_id.country_id.name or ''}.<br><br>
Represented by <b>${object.company_contact_id.name or ''}.</b>
</p>
<p></p>
<h3>Partner Information</h3>
<p>
${object.partner_id.name or ''}.<br>
${object.partner_id.street or ''} <br>
${object.partner_id.state_id.code or ''}
${object.partner_id.zip or ''} ${object.partner_id.city or ''}<br>
${object.partner_id.country_id.name or ''}.<br><br>
Represented by <b>${object.partner_contact_id.name or ''}.</b>
</p>
"""
return deftext
parties = fields.Html(
string="Parties",
track_visibility="onchange",
default=_get_default_parties,
help="Parties of the agreement")
dynamic_parties = fields.Html(
compute="_compute_dynamic_parties",
string="Dynamic Parties",
help="Compute dynamic parties")
agreement_type_id = fields.Many2one(
track_visibility="onchange",
)
agreement_subtype_id = fields.Many2one(
"agreement.subtype",
string="Agreement Sub-type",
track_visibility="onchange",
help="Select the sub-type of this agreement. Sub-Types are related to "
"agreement types.")
product_ids = fields.Many2many(
"product.template", string="Products & Services")
assigned_user_id = fields.Many2one(
"res.users",
string="Assigned To",
track_visibility="onchange",
help="Select the user who manages this agreement.")
company_signed_user_id = fields.Many2one(
"res.users",
string="Signed By",
track_visibility="onchange",
help="The user at our company who authorized/signed the agreement or "
"contract.")
partner_signed_user_id = fields.Many2one(
"res.partner",
string="Signed By (Partner)",
track_visibility="onchange",
help="Contact on the account that signed the agreement/contract.")
parent_agreement_id = fields.Many2one(
"agreement",
string="Parent Agreement",
help="Link this agreement to a parent agreement. For example if this "
"agreement is an amendment to another agreement. This list will "
"only show other agreements related to the same account.")
renewal_type_id = fields.Many2one(
"agreement.renewaltype",
string="Renewal Type",
track_visibility="onchange",
help="Describes what happens after the contract expires.")
recital_ids = fields.One2many(
"agreement.recital", "agreement_id", string="Recitals", copy=True)
sections_ids = fields.One2many(
"agreement.section", "agreement_id", string="Sections", copy=True)
clauses_ids = fields.One2many(
"agreement.clause", "agreement_id", string="Clauses")
appendix_ids = fields.One2many(
"agreement.appendix", "agreement_id", string="Appendices", copy=True)
previous_version_agreements_ids = fields.One2many(
"agreement",
"parent_agreement_id",
string="Previous Versions",
copy=False,
domain=[("active", "=", False)])
child_agreements_ids = fields.One2many(
"agreement",
"parent_agreement_id",
string="Child Agreements",
copy=False,
domain=[("active", "=", True)])
line_ids = fields.One2many(
"agreement.line",
"agreement_id",
string="Products/Services",
copy=False)
state = fields.Selection(
[("draft", "Draft"), ("active", "Active"), ("inactive", "Inactive")],
default="draft",
track_visibility="always")
notification_address_id = fields.Many2one(
"res.partner",
string="Notification Address",
help="The address to send notificaitons to, if different from "
"customer address.(Address Type = Other)")
signed_contract_filename = fields.Char(string="Filename")
signed_contract = fields.Binary(
string="Signed Document", track_visibility="always")
# Dynamic field editor
field_domain = fields.Char(string='Field Expression',
default='[["active", "=", True]]')
default_value = fields.Char(
string="Default Value",
help="Optional value to use if the target field is empty.")
copyvalue = fields.Char(
string="Placeholder Expression",
help="""Final placeholder expression, to be copy-pasted in the desired
template field.""")
@api.onchange("field_domain", "default_value")
def onchange_copyvalue(self):
self.copyvalue = False
if self.field_domain:
string_list = self.field_domain.split(",")
if string_list:
field_domain = string_list[0][3:-1]
self.copyvalue = "${{object.{} or {}}}".format(
field_domain,
self.default_value or "''")
# compute the dynamic content for mako expression
@api.multi
def _compute_dynamic_description(self):
MailTemplates = self.env["mail.template"]
for agreement in self:
lang = agreement.partner_id.lang or "en_US"
description = MailTemplates.with_context(
lang=lang
)._render_template(
agreement.description, "agreement", agreement.id
)
agreement.dynamic_description = description
@api.multi
def _compute_dynamic_parties(self):
MailTemplates = self.env["mail.template"]
for agreement in self:
lang = agreement.partner_id.lang or "en_US"
parties = MailTemplates.with_context(
lang=lang
)._render_template(
agreement.parties, "agreement", agreement.id
)
agreement.dynamic_parties = parties
@api.multi
def _compute_dynamic_special_terms(self):
MailTemplates = self.env["mail.template"]
for agreement in self:
lang = agreement.partner_id.lang or "en_US"
special_terms = MailTemplates.with_context(
lang=lang
)._render_template(
agreement.special_terms, "agreement", agreement.id
)
agreement.dynamic_special_terms = special_terms
# Used for Kanban grouped_by view
@api.model
def _read_group_stage_ids(self, stages, domain, order):
stage_ids = self.env["agreement.stage"].search(
[('stage_type', '=', 'agreement')])
return stage_ids
stage_id = fields.Many2one(
"agreement.stage",
string="Stage",
group_expand="_read_group_stage_ids",
help="Select the current stage of the agreement.",
track_visibility="onchange",
index=True)
# Create New Version Button
@api.multi
def create_new_version(self, vals):
for rec in self:
if not rec.state == "draft":
# Make sure status is draft
rec.state = "draft"
default_vals = {
"name": "{} - OLD VERSION".format(rec.name),
"active": False,
"parent_agreement_id": rec.id,
}
# Make a current copy and mark it as old
rec.copy(default=default_vals)
# Increment the Version
rec.version = rec.version + 1
# Reset revision to 0 since it's a new version
vals["revision"] = 0
return super(Agreement, self).write(vals)
def create_new_agreement(self):
default_vals = {
"name": "NEW",
"active": True,
"version": 1,
"revision": 0,
"state": "draft",
"stage_id": self.env.ref("agreement_legal.agreement_stage_new").id,
}
res = self.copy(default=default_vals)
res.sections_ids.mapped('clauses_ids').write({'agreement_id': res.id})
return {
"res_model": "agreement",
"type": "ir.actions.act_window",
"view_mode": "form",
"view_type": "form",
"res_id": res.id,
}
@api.model
def create(self, vals):
if vals.get("code", _("New")) == _("New"):
vals["code"] = self.env["ir.sequence"].next_by_code(
"agreement"
) or _("New")
if not vals.get('stage_id'):
vals["stage_id"] = \
self.env.ref("agreement_legal.agreement_stage_new").id
return super(Agreement, self).create(vals)
# Increments the revision on each save action
@api.multi
def write(self, vals):
res = True
for rec in self:
vals["revision"] = rec.revision + 1
res = super(Agreement, rec).write(vals)
return res
| agpl-3.0 | -390,334,634,178,866,200 | 37.429333 | 79 | 0.595448 | false | 4.173472 | false | false | false |
feikname/spades-server | pyspades/gamemodes.py | 1 | 2853 | from pyspades import contained as loaders
from pyspades.collision import vector_collision, collision_3d
from pyspades.constants import TC_CAPTURE_DISTANCE
ctf_data = loaders.CTFState()
tc_data = loaders.TCState()
class IntelBasedGamemode:
name = "ctf"
def __init__(self, protocol):
self.protocol = protocol
self.green_flag = protocol.green_team.flag
self.blue_flag = protocol.blue_team.flag
self.state_loader = loaders.CTFState()
self.drop_intel_loader = loaders.IntelDrop()
self.drop_pickup_loader = loaders.IntelPickup()
self.drop_capture_loader = loaders.IntelCapture()
def on_position_update(self, player):
target_flag = self.get_target_flag(player)
if vector_collision(player.world_object.position,
player.team.base):
if target_flag.player is self:
player.capture_flag()
player.check_refill()
if target_flag.player is None and vector_collision(
player.position, target_flag):
player.take_flag()
def get_state_packet(self):
return
def on_player_reset(self, player):
flag = self.get_player_flag(player)
if flag is None:
return
position = player.position
x = int(position.x)
y = int(position.y)
z = max(0, int(position.z))
z = self.protocol.map.get_z(x, y, z)
flag.set(x, y, z)
flag.player = None
intel_drop = loaders.IntelDrop()
intel_drop.player_id = player.player_id
intel_drop.x = flag.x
intel_drop.y = flag.y
intel_drop.z = flag.z
self.protocol.broadcast_contained(intel_drop, save=True)
player.on_flag_drop()
def get_player_flag(self, player):
for flag in (self.blue_flag, self.green_flag):
if flag.player is self:
return flag
return None
def get_target_flag(self, connection):
return connection.team.other_flag
class TerritoryBasedGamemode(object):
name = "tc"
def __init__(self, protocol):
self.protocol = protocol
self.state_loader = loaders.TCState()
def get_state_packet(self):
return
def on_position_update(self, connection):
for entity in self.protocol.entities:
collides = vector_collision(
entity, connection.world_object.position, TC_CAPTURE_DISTANCE)
if self in entity.players:
if not collides:
entity.remove_player(self)
else:
if collides:
entity.add_player(self)
if collides and vector_collision(entity,
connection.world_object.position):
connection.check_refill()
| gpl-3.0 | 6,317,517,780,846,743,000 | 30.7 | 79 | 0.590957 | false | 3.951524 | false | false | false |
estaban/pyload | module/plugins/crypter/TnyCz.py | 1 | 1303 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
from module.plugins.internal.SimpleCrypter import SimpleCrypter
import re
class TnyCz(SimpleCrypter):
__name__ = "TnyCz"
__version__ = "0.01"
__type__ = "crypter"
__pattern__ = r'http://(?:www\.)?tny\.cz/\w+'
__description__ = """Tny.cz decrypter plugin"""
__author_name__ = "Walter Purcaro"
__author_mail__ = "[email protected]"
TITLE_PATTERN = r'<title>(?P<title>.+) - .+</title>'
def getLinks(self):
m = re.search(r'<a id=\'save_paste\' href="(.+save\.php\?hash=.+)">', self.html)
return re.findall(".+", self.load(m.group(1), decode=True)) if m else None
| gpl-3.0 | -5,228,202,936,129,360,000 | 32.410256 | 88 | 0.655411 | false | 3.512129 | false | false | false |
CERNatschool/particle-rate-plotter | process-frames.py | 1 | 5413 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CERN@school - Processing Frames
See the README.md file for more information.
"""
#...for the operating stuff.
import os
#...for the file processing.
import glob
#...for parsing the arguments.
import argparse
#...for the logging.
import logging as lg
#...for file manipulation.
from shutil import rmtree
# Import the JSON library.
import json
#...for processing the datasets.
from cernatschool.dataset import Dataset
#...for making time.
from cernatschool.handlers import make_time_dir
#...for making the frame and clusters images.
from visualisation.visualisation import makeFrameImage
if __name__ == "__main__":
print("*")
print("*======================================*")
print("* CERN@school - local frame processing *")
print("*======================================*")
# Get the datafile path from the command line.
parser = argparse.ArgumentParser()
parser.add_argument("inputPath", help="Path to the input dataset.")
parser.add_argument("outputPath", help="The path for the output files.")
parser.add_argument("-v", "--verbose", help="Increase output verbosity", action="store_true")
args = parser.parse_args()
## The path to the data file.
datapath = args.inputPath
#
# Check if the input directory exists. If it doesn't, quit.
if not os.path.isdir(datapath):
raise IOError("* ERROR: '%s' input directory does not exist!" % (datapath))
## The output path.
outputpath = args.outputPath
# Check if the output directory exists. If it doesn't, quit.
if not os.path.isdir(outputpath):
raise IOError("* ERROR: '%s' output directory does not exist!" % (outputpath))
# Set the logging level.
if args.verbose:
level=lg.DEBUG
else:
level=lg.INFO
# Configure the logging.
lg.basicConfig(filename = outputpath + '/log_process-frames.log', filemode='w', level=level)
print("*")
print("* Input path : '%s'" % (datapath))
print("* Output path : '%s'" % (outputpath))
print("*")
# Set up the directories
#------------------------
# Create the subdirectories.
## The path to the frame images.
frame_output_path = os.path.join(outputpath, "PNG")
#
if os.path.isdir(frame_output_path):
rmtree(frame_output_path)
lg.info(" * Removing directory '%s'..." % (frame_output_path))
os.mkdir(frame_output_path)
lg.info(" * Creating directory '%s'..." % (frame_output_path))
lg.info("")
## The path to the dataset.
dataset_path = os.path.join(datapath, "RAW/ASCIIxyC")
## The dataset to process.
ds = Dataset(dataset_path)
# Get the metadata from the JSON.
## The frame metadata.
fmd = None
#
with open(os.path.join(datapath, "geo.json"), "r") as fmdf:
fmd = json.load(fmdf, fmd)
#
## Latitude of the dataset [deg.].
lat = fmd['lat'] # [deg.]
#
## Longitude of the dataset [deg.].
lon = fmd['lon'] # [deg.]
#
## Altitude of the dataset [m].
alt = fmd['alt'] # [m]
## The pixel mask.
pixel_mask = {}
with open(os.path.join(datapath, "masked_pixels.txt"), "r") as mpf:
rows = mpf.readlines()
for row in rows:
vals = [int(val) for val in row.strip().split("\t")]
x = vals[0]; y = vals[1]; X = (256*y) + x; C = 1
pixel_mask[X] = C
## The frames from the dataset.
frames = ds.getFrames((lat, lon, alt), pixelmask = pixel_mask)
lg.info("* Found %d datafiles." % (len(frames)))
## A list of frames.
mds = []
# Loop over the frames and upload them to the DFC.
for f in frames:
## The basename for the data frame, based on frame information.
bn = "%s_%s" % (f.getChipId(), make_time_dir(f.getStartTimeSec()))
#bn = "%s_%d-%06d" % (f.getChipId(), f.getStartTimeSec(), f.getStartTimeSubSec())
# Create the frame image.
makeFrameImage(bn, f.getPixelMap(), frame_output_path, f.getPixelMask())
# Create the metadata dictionary for the frame.
metadata = {
"id" : bn,
#
"chipid" : f.getChipId(),
"hv" : f.getBiasVoltage(),
"ikrum" : f.getIKrum(),
#
"lat" : f.getLatitude(),
"lon" : f.getLongitude(),
"alt" : f.getAltitude(),
#
"start_time" : f.getStartTimeSec(),
"end_time" : f.getEndTimeSec(),
"acqtime" : f.getAcqTime(),
#
"n_pixel" : f.getNumberOfUnmaskedPixels(),
"occ" : f.getOccupancy(),
"occ_pc" : f.getOccupancyPc(),
#
"n_kluster" : f.getNumberOfKlusters(),
"n_gamma" : f.getNumberOfGammas(),
"n_non_gamma" : f.getNumberOfNonGammas(),
#
"ismc" : int(f.isMC())
}
# Add the frame metadata to the list of frames.
mds.append(metadata)
# Write out the frame information to a JSON file.
# We will use this later to make the frame plots,
# rather than processing the whole frame set again.
#
with open(os.path.join(outputpath, "frames.json"), "w") as jf:
json.dump(mds, jf)
| mit | -2,368,854,573,512,871,400 | 28.418478 | 97 | 0.558286 | false | 3.59429 | false | false | false |
Ledoux/ShareYourSystem | Pythonlogy/ShareYourSystem/Standards/Classors/Switcher/__init__.py | 2 | 10216 | # -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
The Switcher
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Classors.Watcher"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Tester"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import operator
import copy
from ShareYourSystem.Standards.Classors import Doer,Observer
#</ImportSpecificModules>
#<DefineFunctions>
def setSwitch(
_InstanceVariable,
_DoMethodVariable=None,
_DoerClassVariable=None,
_HookVariable=None
):
#Debug
'''
print('l 31 setSwitch')
print('_DoerVariable is ',_DoerVariable)
print('_DoVariable is ',_DoVariable)
print('_HookVariable is ',_HookVariable)
#print('_InstanceVariable.__class__.NameStr is ',_InstanceVariable.__class__.NameStr)
print('')
'''
#/#################/#
# Adapt the shape of the do method str to switch
#
#Check
if type(_DoMethodVariable)!=list:
#Check
if _DoMethodVariable==None:
#/#################/#
# Give all the do method str
#
#alias
#DoMethodStrsList=_InstanceVariable.DoMethodStrsList
#/#################/#
# Give just the last DoMethodStr
#
#Check
if _InstanceVariable.__class__.DoMethodStr in _InstanceVariable.__class__.SwitchMethodDict:
#listify
DoMethodStrsList=[_InstanceVariable.__class__.DoMethodStr]
else:
#listify
DoMethodStrsList=[]
else:
#listify
DoMethodStrsList=[_DoMethodVariable]
else:
#just alias
DoMethodStrsList=_DoMethodVariable
#/#################/#
# Adapt the shape of the mro doer to switch
#
#get
DoerClassesList=SYS.GetList(_DoerClassVariable)
#Debug
'''
print('l 94 Switcher')
print('_DoerClassVariable is')
print(_DoerClassVariable)
print('DoerClassesList is')
print(DoerClassesList)
print('')
'''
#Check
if _DoerClassVariable==None:
#/#################/#
# by default this is all the mro doer that have all the switch do method
# so do the intersection
#Check
if len(DoMethodStrsList)>0:
#intersection
DoerClassesList=list(
set.intersection(*
map(
lambda __DoMethodStr:
set(_InstanceVariable.__class__.SwitchMethodDict[__DoMethodStr]),
DoMethodStrsList
)
)
)
else:
#init
DoerClassesList=[]
#/#################/#
# Adapt the shape of the hook strs
#
#Check
if type(_HookVariable)!=list:
if _HookVariable==None:
HookStrsList=['Before','After']
else:
HookStrsList=[_HookVariable]
else:
HookStrsList=_HookVariable
#/#################/#
# Now map the switch
#
#Debug
'''
print('l 139 Switcher')
#print('_InstanceVariable is ')
#print(_InstanceVariable)
print('DoMethodStrsList is')
print(DoMethodStrsList)
print('DoerClassesList is ')
print(DoerClassesList)
print('HookStrsList is ')
print(HookStrsList)
print('')
'''
#map
map(
lambda __HookStr:
map(
lambda __DoerClass:
map(
lambda __DoMethodStr:
_InstanceVariable.__setattr__(
'Watch'+__HookStr+__DoMethodStr[0].upper(
)+__DoMethodStr[1:]+'With'+__DoerClass.NameStr+'Bool',
False
),
DoMethodStrsList,
),
DoerClassesList
),
HookStrsList
)
#Debug
'''
print('l 170 Switcher')
print('End of setSwitch')
print('')
'''
#return
return _InstanceVariable
def switch(_InstanceVariable,*_LiargVariablesList,**_KwargVariablesDict):
#Debug
'''
print('l 196 Switcher')
print('In the switch function ')
print('_KwargVariablesDict is ')
print(_KwargVariablesDict)
print('')
'''
"""
#alias
FuncDict=switch.__dict__
#Debug
'''
print('l 52')
print('In the switch function ')
print('FuncDict is ')
print(FuncDict)
print('')
'''
"""
#Check
if hasattr(_InstanceVariable,_KwargVariablesDict['WatchBeforeDoBoolKeyStr']):
#Debug
'''
print('Switcher l 201')
print('Check for a WatchBeforeDoBoolKeyStr')
print("_KwargVariablesDict['WatchBeforeDoBoolKeyStr'] is ")
print(_KwargVariablesDict['WatchBeforeDoBoolKeyStr'])
print('')
'''
#get
WatchDoBool=getattr(
_InstanceVariable,
_KwargVariablesDict['WatchBeforeDoBoolKeyStr']
)
#Debug
'''
print('Switcher l 236')
print('WatchDoBool is')
print(WatchDoBool)
'''
#Switch
if WatchDoBool:
return _InstanceVariable
#get the wrapped method
WrapUnboundMethod=getattr(
getattr(
SYS,
_KwargVariablesDict['BindDoClassStr']
),
_KwargVariablesDict['BindObserveWrapMethodStr']
)
#del
map(
lambda __KeyStr:
_KwargVariablesDict.__delitem__(__KeyStr),
[
'BindObserveWrapMethodStr',
'BindDoClassStr',
'WatchBeforeDoBoolKeyStr'
]
)
#Call
return WrapUnboundMethod(
_InstanceVariable,
*_LiargVariablesList,
**_KwargVariablesDict
)
def getSwitch(_InstanceVariable,_MethodVariable=None):
#Check
if _MethodVariable==None:
SwitchItemTuplesList=_InstanceVariable.SwitchMethodDict.items()
elif type(_MethodVariable) in [list,tuple]:
SwitchItemTuplesList=map(
lambda __MethodStr:
(
__MethodStr,
_InstanceVariable.SwitchMethodDict[__MethodStr]
),
_MethodVariable
)
else:
SwitchItemTuplesList=[
(
_MethodVariable,
_InstanceVariable.SwitchMethodDict[_MethodVariable]
)
]
#Debug
'''
print('getSwitch l 266')
print('_MethodVariable is ')
print(_MethodVariable)
print('SwitchItemTuplesList is ')
print(SwitchItemTuplesList)
print('')
'''
#return
WatchKeyStrsList=SYS.flat(
SYS.flat(
map(
lambda __SwitchItemTuple:
map(
lambda __ClassStr:
map(
lambda __HookStr:
'Watch'+__HookStr+SYS.getUpperWordStr(
__SwitchItemTuple[0]
)+'With'+SYS.getNameStrWithClassStr(
__ClassStr
)+'Bool',
['Before','After']
),
map(lambda __Class:__Class.__name__,__SwitchItemTuple[1])
),
SwitchItemTuplesList
)
)
)
#Debug
'''
print('getSwitch l 300')
print('WatchKeyStrsList is ')
print(WatchKeyStrsList)
print('WatchKeyStrsList is ')
print(WatchKeyStrsList)
print('')
'''
#return
return dict(
zip(
WatchKeyStrsList,
map(
lambda __WatchKeyStr:
getattr(_InstanceVariable,__WatchKeyStr),
WatchKeyStrsList
)
)
)
#</DefineFunctions>
#<DefineClass>
@DecorationClass()
class SwitcherClass(BaseClass):
#Definition
RepresentingKeyStrsList=[
'SwitchingIsBool',
'SwitchingWrapMethodStr'
]
def default_init(self,
_SwitchingIsBool=False,
_SwitchingWrapMethodStr="",
**_KwargVariablesDict
):
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
def __call__(self,_Class):
#Call the parent method
Observer.ObserverClass.__bases__[0].__call__(self,_Class)
#reset
self.switch()
#Return
return _Class
def do_switch(self):
#Check
if self.SwitchingIsBool:
#alias
SwitchedClass=self.DoClass
#Debug
'''
print('l 195 Switcher')
print('self.SwitchingWrapMethodStr is '+self.SwitchingWrapMethodStr)
print('')
'''
#watch first
self.watch(
True,
**{'ObservingWrapMethodStr':self.SwitchingWrapMethodStr}
)
#Debug
'''
print('l 204 Switcher')
print('self.WatchedDecorationMethodStr is ',self.WatchedDecorationMethodStr)
print('')
'''
#first bind
self.bind(
True,
switch,
"",
switch.__name__,
[('WatchBeforeDoBoolKeyStr',self.WatchedBeforeDoBoolKeyStr)],
**{'ObservingWrapMethodStr':self.WatchedDecorationMethodStr}
)
#Define
SwitchedDecorationUnboundMethod=getattr(
SwitchedClass,
self.BindedDecorationMethodStr
)
#Now make the amalgam
setattr(
SwitchedClass,
self.SwitchingWrapMethodStr,
SwitchedDecorationUnboundMethod
)
#/##################/#
# Set maybe for the first time
# the setSwitch and the getSwitch
#Check
if hasattr(SwitchedClass,'setSwitch')==False:
#set
setattr(
SwitchedClass,
setSwitch.__name__,
setSwitch
)
#get the unbound
setSwitchUnboundMethod=getattr(
SwitchedClass,
setSwitch.__name__
)
#add in the inspect
SwitchedClass.InspectMethodDict[setSwitch.__name__]=setSwitchUnboundMethod
SwitchedClass.InspectInspectDict[setSwitch.__name__]=SYS.InspectDict(
setSwitchUnboundMethod
)
#set
self.setMethod(
getSwitch.__name__,
getSwitch
)
#/##################/#
# Init the SwitchMethodDict
#
#Check
if hasattr(SwitchedClass,'SwitchMethodDict')==False:
#Debug
'''
print('Switcher l 345')
print('SwitchedClass is ')
print(SwitchedClass)
print('we init a SwitchMethodDict')
print('')
'''
#Check
if hasattr(SwitchedClass.__bases__[0],'SwitchMethodDict'):
#Debug
print('Switcher l 488')
print('SwitchedClass is ')
print(SwitchedClass)
print('SwitchedClass.__bases__[0] is ')
print(SwitchedClass.__bases__[0])
print('')
#copy
SwitchedClass.SwitchMethodDict=copy.copy(
SwitchedClass.__bases__[0].SwitchMethodDict
)
else:
#init
SwitchedClass.SwitchMethodDict={
self.SwitchingWrapMethodStr:[SwitchedClass]
}
else:
#/##################/#
# add
#
#Debug
'''
print('Switcher l 514')
print('SwitchedClass is ')
print(SwitchedClass)
print('there is already a SwitchMethodDict')
print('self.SwitchingWrapMethodStr is ')
print(self.SwitchingWrapMethodStr)
print('SwitchedClass.SwitchMethodDict is ')
print(SwitchedClass.SwitchMethodDict)
print('')
'''
#copy
SwitchedClass.SwitchMethodDict=copy.copy(
SwitchedClass.SwitchMethodDict
)
#update
if self.SwitchingWrapMethodStr in self.DoClass.SwitchMethodDict:
SwitchedClass.SwitchMethodDict[
self.SwitchingWrapMethodStr
].append(SwitchedClass)
else:
SwitchedClass.SwitchMethodDict[
self.SwitchingWrapMethodStr
]=[SwitchedClass]
#Add to the KeyStrsList
SwitchedClass.KeyStrsList+=[
'SwitchMethodDict'
]
#</DefineClass>
| mit | -2,826,561,491,702,759,400 | 17.608379 | 94 | 0.649178 | false | 3.088271 | false | false | false |
citrix-openstack-build/heat | heat/openstack/common/lockutils.py | 6 | 9338 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import os
import time
import weakref
from eventlet import semaphore
from oslo.config import cfg
from heat.openstack.common import fileutils
from heat.openstack.common.gettextutils import _ # noqa
from heat.openstack.common import local
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
help=('Directory to use for lock files.'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `semaphore.Semaphore` instance unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
{'lock': name})
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
LOG.info(_('Created lock path: %s'), local_lock_path)
def add_prefix(name, prefix):
if not prefix:
return name
sep = '' if prefix.endswith('-') else '-'
return '%s%s%s' % (prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
lock_file_name = add_prefix(name.replace(os.sep, '_'),
lock_file_prefix)
lock_file_path = os.path.join(local_lock_path, lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock as lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
yield lock
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
else:
yield sem
finally:
local.strong_store.locks_held.remove(name)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
{'function': f.__name__})
return f(*args, **kwargs)
LOG.debug(_('Semaphore / lock released "%(function)s"'),
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
| apache-2.0 | 4,315,550,123,892,458,000 | 32.833333 | 78 | 0.612872 | false | 4.301244 | false | false | false |
mjhea0/flask-basic-registration | project/__init__.py | 1 | 1596 | # project/__init__.py
#################
#### imports ####
#################
import os
from flask import Flask, render_template
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
from flask_mail import Mail
from flask_debugtoolbar import DebugToolbarExtension
from flask_sqlalchemy import SQLAlchemy
################
#### config ####
################
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
####################
#### extensions ####
####################
login_manager = LoginManager()
login_manager.init_app(app)
bcrypt = Bcrypt(app)
mail = Mail(app)
toolbar = DebugToolbarExtension(app)
db = SQLAlchemy(app)
####################
#### blueprints ####
####################
from project.main.views import main_blueprint
from project.user.views import user_blueprint
app.register_blueprint(main_blueprint)
app.register_blueprint(user_blueprint)
####################
#### flask-login ####
####################
from project.models import User
login_manager.login_view = "user.login"
login_manager.login_message_category = "danger"
@login_manager.user_loader
def load_user(user_id):
return User.query.filter(User.id == int(user_id)).first()
########################
#### error handlers ####
########################
@app.errorhandler(403)
def forbidden_page(error):
return render_template("errors/403.html"), 403
@app.errorhandler(404)
def page_not_found(error):
return render_template("errors/404.html"), 404
@app.errorhandler(500)
def server_error_page(error):
return render_template("errors/500.html"), 500
| mit | -949,184,860,191,624,200 | 19.202532 | 61 | 0.620301 | false | 3.746479 | false | false | false |
ulaskan/RegEx | regex.py | 1 | 2233 | #! /usr/bin/python
# Author: Ulas A.
# Date: 14 Nov 2015
# Python version: 2.7
# This program takes a text file input by the user in a Linux filesystem and searches for a user input string
# The search simulates Regular expressions. The program can search for text anywhere in the opened file
# the beginning of each line (^) or the end of lines $
import os, re, subprocess
### User inputs and variable definitions
TheFile = raw_input('Type in the file to search: ')
while os.path.exists(TheFile) is False:
print 'Invalid file/folder name, please try again!'
TheFile = raw_input('Type in the file/folder to search: ')
Search = raw_input('Type string to search: ')
print 'Choose an option: \n \ta - Search anywhere in the line \n \t^ - Search beginning of a line \n\t$ - Search end of a line'
option = raw_input('a/^/$: ')
while option not in ('a', '^', '$'):
print 'Invalid option!'
option = raw_input('a/^/$: ')
### Check if the file is binary
def NonBinary(path):
return (re.search(r':.* text', subprocess.Popen(["file", '-L', path], stdout=subprocess.PIPE).stdout.read()) is not None)
### Search anywhere option
if option == "a":
if NonBinary(TheFile):
with open(TheFile, 'rt') as txt:
for line in txt:
if Search in line:
print TheFile, ':', line,
if not os.access(TheFile, os.R_OK):
print TheFile, ': Permission denied'
### Search the beginning of a line option
elif option == "^":
if NonBinary(TheFile):
with open(TheFile, 'rt') as txt:
for line in txt:
CurrentLine = line
regex = re.match(Search, CurrentLine)
if regex:
print TheFile, ':', line,
if not os.access(TheFile, os.R_OK):
print TheFile, ': Permission denied'
### Search the end of a line option
elif option == "$":
if NonBinary(TheFile):
with open(TheFile, 'rt') as txt:
for line in txt:
CurrentLine = line
if re.search(Search+r'$', CurrentLine):
print TheFile, ':', line,
if not os.access(TheFile, os.R_OK):
print TheFile, ': Permission denied'
| gpl-2.0 | 409,560,282,669,412,900 | 34.444444 | 127 | 0.600537 | false | 3.72788 | false | false | false |
JackGavin13/octoprint-test-not-finished | src/octoprint/filemanager/util.py | 9 | 6227 | # coding=utf-8
from __future__ import absolute_import, division, print_function
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2015 The OctoPrint Project - Released under terms of the AGPLv3 License"
import io
from octoprint.util import atomic_write
class AbstractFileWrapper(object):
"""
Wrapper for file representations to save to storages.
Arguments:
filename (str): The file's name
"""
def __init__(self, filename):
self.filename = filename
def save(self, path):
"""
Saves the file's content to the given absolute path.
Arguments:
path (str): The absolute path to where to save the file
"""
raise NotImplementedError()
def stream(self):
"""
Returns a Python stream object (subclass of io.IOBase) representing the file's contents.
Returns:
io.IOBase: The file's contents as a stream.
"""
raise NotImplementedError()
class DiskFileWrapper(AbstractFileWrapper):
"""
An implementation of :class:`.AbstractFileWrapper` that wraps an actual file on disk. The `save` implementations
will either copy the file to the new path (preserving file attributes) or -- if `move` is `True` (the default) --
move the file.
Arguments:
filename (str): The file's name
path (str): The file's absolute path
move (boolean): Whether to move the file upon saving (True, default) or copying.
"""
def __init__(self, filename, path, move=True):
AbstractFileWrapper.__init__(self, filename)
self.path = path
self.move = move
def save(self, path):
import shutil
if self.move:
shutil.move(self.path, path)
else:
shutil.copy2(self.path, path)
def stream(self):
return io.open(self.path, "rb")
class StreamWrapper(AbstractFileWrapper):
"""
A wrapper allowing processing of one or more consecutive streams.
Arguments:
*streams (io.IOBase): One or more streams to process one after another to save to storage.
"""
def __init__(self, filename, *streams):
if not len(streams) > 0:
raise ValueError("Need at least one stream to wrap")
AbstractFileWrapper.__init__(self, filename)
self.streams = streams
def save(self, path):
"""
Will dump the contents of all streams provided during construction into the target file, in the order they were
provided.
"""
import shutil
with atomic_write(path, "wb") as dest:
with self.stream() as source:
shutil.copyfileobj(source, dest)
def stream(self):
"""
If more than one stream was provided to the constructor, will return a :class:`.MultiStream` wrapping all
provided streams in the order they were provided, else the first and only stream is returned directly.
"""
if len(self.streams) > 1:
return MultiStream(*self.streams)
else:
return self.streams[0]
class MultiStream(io.RawIOBase):
"""
A stream implementation which when read reads from multiple streams, one after the other, basically concatenating
their contents in the order they are provided to the constructor.
Arguments:
*streams (io.IOBase): One or more streams to concatenate.
"""
def __init__(self, *streams):
io.RawIOBase.__init__(self)
self.streams = streams
self.current_stream = 0
def read(self, n=-1):
if n == 0:
return b''
if len(self.streams) == 0:
return b''
while self.current_stream < len(self.streams):
stream = self.streams[self.current_stream]
result = stream.read(n)
if result is None or len(result) != 0:
return result
else:
self.current_stream += 1
return b''
def readinto(self, b):
n = len(b)
read = self.read(n)
b[:len(read)] = read
return len(read)
def close(self):
for stream in self.streams:
try:
stream.close()
except:
pass
def readable(self, *args, **kwargs):
return True
def seekable(self, *args, **kwargs):
return False
def writable(self, *args, **kwargs):
return False
class LineProcessorStream(io.RawIOBase):
"""
While reading from this stream the provided `input_stream` is read line by line, calling the (overridable) method
:meth:`.process_line` for each read line.
Sub classes can thus modify the contents of the `input_stream` in line, while it is being read.
Arguments:
input_stream (io.IOBase): The stream to process on the fly.
"""
def __init__(self, input_stream):
io.RawIOBase.__init__(self)
self.input_stream = io.BufferedReader(input_stream)
self.leftover = None
def read(self, n=-1):
if n == 0:
return b''
result = b''
while len(result) < n or n == -1:
bytes_left = (n - len(result)) if n != -1 else -1
if self.leftover is not None:
if bytes_left != -1 and bytes_left < len(self.leftover):
result += self.leftover[:bytes_left]
self.leftover = self.leftover[bytes_left:]
break
else:
result += self.leftover
self.leftover = None
processed_line = None
while processed_line is None:
line = self.input_stream.readline()
if not line:
break
processed_line = self.process_line(line)
if processed_line is None:
break
bytes_left = (n - len(result)) if n != -1 else -1
if bytes_left != -1 and bytes_left < len(processed_line):
result += processed_line[:bytes_left]
self.leftover = processed_line[bytes_left:]
break
else:
result += processed_line
return result
def readinto(self, b):
n = len(b)
read = self.read(n)
b[:len(read)] = read
return len(read)
def process_line(self, line):
"""
Called from the `read` Method of this stream with each line read from `self.input_stream`.
By returning ``None`` the line will not be returned from the read stream, effectively being stripped from the
wrapper `input_stream`.
Arguments:
line (str): The line as read from `self.input_stream`
Returns:
str or None: The processed version of the line (might also be multiple lines), or None if the line is to be
stripped from the processed stream.
"""
return line
def close(self):
self.input_stream.close()
def readable(self, *args, **kwargs):
return True
def seekable(self, *args, **kwargs):
return False
def writable(self, *args, **kwargs):
return False
| agpl-3.0 | -5,343,517,159,775,421,000 | 24.829876 | 114 | 0.68257 | false | 3.352181 | false | false | false |
cloudera/hue | desktop/core/ext-py/openpyxl-2.6.4/openpyxl/workbook/_writer.py | 2 | 6579 | from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
"""Write the workbook global settings to the archive."""
from copy import copy
from openpyxl.utils import absolute_coordinate, quote_sheetname
from openpyxl.xml.constants import (
ARC_APP,
ARC_CORE,
ARC_WORKBOOK,
PKG_REL_NS,
CUSTOMUI_NS,
ARC_ROOT_RELS,
)
from openpyxl.xml.functions import tostring, fromstring
from openpyxl.packaging.relationship import Relationship, RelationshipList
from openpyxl.workbook.defined_name import DefinedName
from openpyxl.workbook.external_reference import ExternalReference
from openpyxl.packaging.workbook import ChildSheet, WorkbookPackage, PivotCache
from openpyxl.workbook.properties import WorkbookProperties
from openpyxl.utils.datetime import CALENDAR_MAC_1904
def get_active_sheet(wb):
"""
Return the index of the active sheet.
If the sheet set to active is hidden return the next visible sheet or None
"""
visible_sheets = [idx for idx, sheet in enumerate(wb._sheets) if sheet.sheet_state == "visible"]
if not visible_sheets:
raise IndexError("At least one sheet must be visible")
idx = wb._active_sheet_index
sheet = wb.active
if sheet and sheet.sheet_state == "visible":
return idx
for idx in visible_sheets[idx:]:
wb.active = idx
return idx
return None
class WorkbookWriter:
def __init__(self, wb):
self.wb = wb
self.rels = RelationshipList()
self.package = WorkbookPackage()
self.package.workbookProtection = wb.security
self.package.calcPr = wb.calculation
def write_properties(self):
props = WorkbookProperties() # needs a mapping to the workbook for preservation
if self.wb.code_name is not None:
props.codeName = self.wb.code_name
if self.wb.excel_base_date == CALENDAR_MAC_1904:
props.date1904 = True
self.package.workbookPr = props
def write_worksheets(self):
for idx, sheet in enumerate(self.wb._sheets, 1):
sheet_node = ChildSheet(name=sheet.title, sheetId=idx, id="rId{0}".format(idx))
rel = Relationship(type=sheet._rel_type, Target=sheet.path)
self.rels.append(rel)
if not sheet.sheet_state == 'visible':
if len(self.wb._sheets) == 1:
raise ValueError("The only worksheet of a workbook cannot be hidden")
sheet_node.state = sheet.sheet_state
self.package.sheets.append(sheet_node)
def write_refs(self):
for link in self.wb._external_links:
# need to match a counter with a workbook's relations
rId = len(self.wb.rels) + 1
rel = Relationship(type=link._rel_type, Target=link.path)
self.rels.append(rel)
ext = ExternalReference(id=rel.id)
self.package.externalReferences.append(ext)
def write_names(self):
defined_names = copy(self.wb.defined_names)
# Defined names -> autoFilter
for idx, sheet in enumerate(self.wb.worksheets):
auto_filter = sheet.auto_filter.ref
if auto_filter:
name = DefinedName(name='_FilterDatabase', localSheetId=idx, hidden=True)
name.value = u"{0}!{1}".format(quote_sheetname(sheet.title),
absolute_coordinate(auto_filter)
)
defined_names.append(name)
# print titles
if sheet.print_titles:
name = DefinedName(name="Print_Titles", localSheetId=idx)
name.value = ",".join([u"{0}!{1}".format(quote_sheetname(sheet.title), r)
for r in sheet.print_titles.split(",")])
defined_names.append(name)
# print areas
if sheet.print_area:
name = DefinedName(name="Print_Area", localSheetId=idx)
name.value = ",".join([u"{0}!{1}".format(quote_sheetname(sheet.title), r)
for r in sheet.print_area])
defined_names.append(name)
self.package.definedNames = defined_names
def write_pivots(self):
pivot_caches = set()
for pivot in self.wb._pivots:
if pivot.cache not in pivot_caches:
pivot_caches.add(pivot.cache)
c = PivotCache(cacheId=pivot.cacheId)
self.package.pivotCaches.append(c)
rel = Relationship(Type=pivot.cache.rel_type, Target=pivot.cache.path)
self.rels.append(rel)
c.id = rel.id
#self.wb._pivots = [] # reset
def write_views(self):
active = get_active_sheet(self.wb)
if self.wb.views:
self.wb.views[0].activeTab = active
self.package.bookViews = self.wb.views
def write(self):
"""Write the core workbook xml."""
self.write_properties()
self.write_worksheets()
self.write_names()
self.write_pivots()
self.write_views()
self.write_refs()
return tostring(self.package.to_tree())
def write_rels(self):
"""Write the workbook relationships xml."""
styles = Relationship(type='styles', Target='styles.xml')
self.rels.append(styles)
theme = Relationship(type='theme', Target='theme/theme1.xml')
self.rels.append(theme)
if self.wb.vba_archive:
vba = Relationship(type='', Target='vbaProject.bin')
vba.Type ='http://schemas.microsoft.com/office/2006/relationships/vbaProject'
self.rels.append(vba)
return tostring(self.rels.to_tree())
def write_root_rels(self):
"""Write the package relationships"""
rels = RelationshipList()
rel = Relationship(type="officeDocument", Target=ARC_WORKBOOK)
rels.append(rel)
rel = Relationship(Type="%s/metadata/core-properties" % PKG_REL_NS, Target=ARC_CORE)
rels.append(rel)
rel = Relationship(type="extended-properties", Target=ARC_APP)
rels.append(rel)
if self.wb.vba_archive is not None:
# See if there was a customUI relation and reuse it
xml = fromstring(self.wb.vba_archive.read(ARC_ROOT_RELS))
root_rels = RelationshipList.from_tree(xml)
for rel in root_rels.find(CUSTOMUI_NS):
rels.append(rel)
return tostring(rels.to_tree())
| apache-2.0 | 2,303,528,685,725,274,000 | 33.088083 | 100 | 0.605563 | false | 3.833916 | false | false | false |
hpparvi/PyTransit | notebooks/contamination/src/plotting.py | 1 | 5677 | # PyTransit: fast and easy exoplanet transit modelling in Python.
# Copyright (C) 2010-2019 Hannu Parviainen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import matplotlib as mpl
import matplotlib.pyplot as pl
import seaborn as sb
from matplotlib.gridspec import GridSpec
from numpy import sqrt, percentile, ceil, linspace, concatenate, histogram
color = sb.color_palette()[0]
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [sb.utils.set_hls_values(color_rgb, l=l) for l in linspace(1, 0, 12)]
cmap = sb.blend_palette(colors, as_cmap=True)
color = sb.color_palette()[1]
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [sb.utils.set_hls_values(color_rgb, l=l) for l in linspace(1, 0, 12)]
cmap2 = sb.blend_palette(colors, as_cmap=True)
c_ob = "#002147" # Oxford blue
c_bo = "#CC5500" # Burnt orange
def marginal_radius_ratio_plot(df, bins: int = 40, klim: tuple = None, figsize: tuple = (6, 5)):
if klim is None:
klim = percentile(concatenate([df.k_app.values, df.k_true.values]), [0.0, 99.9])
v1, e = histogram(df.k_app, range=klim, bins=bins, density=True)
v2, e = histogram(df.k_true, range=klim, bins=bins, density=True)
fig, ax = pl.subplots(figsize=figsize, constrained_layout=True)
ax.fill_betweenx(e[:-1], -10 - v1, -10, step='post', linewidth=1, edgecolor='k')
ax.fill_betweenx(e[:-1], 10 + v2, 10, step='post', linewidth=1, edgecolor='k')
ax.text(-0.5 * v1.max(), klim[1], 'Apparent radius ratio', ha='center', va='top')
ax.text(0.5 * v2.max(), klim[1], 'True radius ratio', ha='center', va='top')
pl.setp(ax, xlabel='Posterior density', ylabel='Radius ratio', xticks=[], ylim=klim)
sb.despine(fig, offset=5)
return fig
def _jplot(hte, cte, cnr, imp, rho, fw=10, nb=30, gs=25, ylabel='Contamination in $i\'$', **kwargs):
htelim = kwargs.get('htelim', (2000, 8000))
ctelim = kwargs.get('ctelim', (4000, 12000))
blim = kwargs.get('blim', (0, 1))
rlim = kwargs.get('rlim', (0, 15))
clim = kwargs.get('clim', (0, 1))
fig = pl.figure(figsize=(fw, fw / 4))
gs_tt = GridSpec(2, 1, bottom=0.2, top=1, left=0.1, right=0.3, hspace=0, wspace=0, height_ratios=[0.15, 0.85], figure=fig)
gs_ct = GridSpec(2, 5, bottom=0.2, top=1, left=0.38, right=1, hspace=0.05, wspace=0.05,
height_ratios=[0.15, 0.85],
width_ratios=[1, 1, 1, 1, 0.2], figure=fig)
ax_tt = pl.subplot(gs_tt[1, 0])
ax_chj = pl.subplot(gs_ct[1, 0])
ax_ccj = pl.subplot(gs_ct[1, 1])
ax_cbj = pl.subplot(gs_ct[1, 2])
ax_crj = pl.subplot(gs_ct[1, 3])
ax_thm = pl.subplot(gs_ct[0, 0])
ax_ctm = pl.subplot(gs_ct[0, 1])
ax_bm = pl.subplot(gs_ct[0, 2])
ax_rm = pl.subplot(gs_ct[0, 3])
ax_cnm = pl.subplot(gs_ct[1, 4])
ax_tt.hexbin(hte, cte, gridsize=gs, cmap=cmap, extent=(htelim[0], htelim[1], ctelim[0], ctelim[1]))
ax_chj.hexbin(hte, cnr, gridsize=gs, cmap=cmap, extent=(htelim[0], htelim[1], clim[0], clim[1]))
ax_ccj.hexbin(cte, cnr, gridsize=gs, cmap=cmap, extent=(ctelim[0], ctelim[1], clim[0], clim[1]))
ax_cbj.hexbin(imp, cnr, gridsize=gs, cmap=cmap, extent=(blim[0], blim[1], clim[0], clim[1]))
ax_crj.hexbin(rho, cnr, gridsize=gs, cmap=cmap, extent=(rlim[0], rlim[1], clim[0], clim[1]))
ax_thm.hist(hte, bins=nb, alpha=0.5, range=htelim, histtype='stepfilled')
ax_ctm.hist(cte, bins=nb, alpha=0.5, range=ctelim, histtype='stepfilled')
ax_bm.hist(imp, bins=nb, alpha=0.5, range=blim, histtype='stepfilled')
ax_rm.hist(rho, bins=nb, alpha=0.5, range=rlim, histtype='stepfilled')
ax_cnm.hist(cnr, bins=nb, alpha=0.5, range=clim, histtype='stepfilled', orientation='horizontal')
pl.setp(ax_tt, xlabel='Host $T_\mathrm{Eff}$', ylabel='Contaminant $T_\mathrm{Eff}$')
pl.setp(ax_chj, xlabel='Host $T_\mathrm{Eff}$', ylabel=ylabel)
pl.setp(ax_ccj, xlabel='Contaminant $T_\mathrm{Eff}$')
pl.setp(ax_cbj, xlabel='Impact parameter')
pl.setp(ax_crj, xlabel='Stellar density')
pl.setp(ax_thm, xlim=ax_chj.get_xlim())
pl.setp(ax_ctm, xlim=ax_ccj.get_xlim())
pl.setp(ax_bm, xlim=ax_cbj.get_xlim())
pl.setp([ax_ccj, ax_cnm], ylim=ax_chj.get_ylim())
pl.setp([ax_chj, ax_ccj, ax_cbj, ax_crj, ax_cnm], ylim=clim)
pl.setp([ax_thm, ax_ctm, ax_cnm, ax_bm, ax_rm], yticks=[], xticks=[])
pl.setp(ax_ccj.get_yticklabels(), visible=False)
pl.setp(ax_cbj.get_yticklabels(), visible=False)
pl.setp(ax_crj.get_yticklabels(), visible=False)
[sb.despine(ax=ax, left=True, offset=0.1) for ax in [ax_thm, ax_ctm, ax_bm, ax_rm]]
[sb.despine(ax=ax) for ax in [ax_chj, ax_ccj, ax_cbj, ax_crj]]
sb.despine(ax=ax_cnm, bottom=True)
return fig, ax_tt, ax_chj, ax_cbj, ax_ccj, ax_crj
def joint_radius_ratio_plot(df, fw=10, nb=30, gs=25, **kwargs):
return _jplot(df.teff_h, df.teff_c, df.k_true, df.b, df.rho, fw, nb, gs, ylabel='True radius ratio', **kwargs)[0]
def joint_contamination_plot(df, fw=10, nb=30, gs=25, **kwargs):
return _jplot(df.teff_h, df.teff_c, df.cnt, df.b, df.rho, fw, nb, gs, **kwargs)[0]
| gpl-2.0 | -746,157,780,433,276,000 | 45.917355 | 126 | 0.648758 | false | 2.503086 | false | false | false |
tribhuvanesh/R3PI-PO | persistent_helpers.py | 1 | 3488 | #!flask/bin/python
import json
import os
import urllib2
import httplib2 as http
import requests
from creds import bigOvenAPIkey
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from flask import Flask, jsonify
recipe_blob_dct = {
1 : {
'id': 1,
'title': 'Chicken Alfredo',
'description': 'Chicken & Pasta & Cream.',
'img': 'https://c1.staticflickr.com/3/2504/3874012191_48ec021023.jpg'
},
2 : {
'id': 2,
'title': 'Lasagna',
'description': 'Garfield\'s Favorite.',
'img': 'https://upload.wikimedia.org/wikipedia/commons/6/6b/Lasagna_(1).jpg'
},
3 : {
'id': 3,
'title': 'Pizza',
'description': 'Best served cold, just like revenge.',
'img': 'https://upload.wikimedia.org/wikipedia/commons/9/95/Pizza_with_various_toppings.jpg'
}
}
def get_recipe_ids():
"""Returns a list of valid recipe IDS.
@return: a JSON object - list of recipe IDs, where recipe ID is a number
"""
"""
1. Get listing of files in the directory
2. Make array of integers with the names of the files
"""
dirListing = os.listdir("recipeJSONs/")
lst = []
tempName = []
for item in dirListing:
if ".json" in item:
tempName = os.path.splitext(item)[0]
lst.append(int(tempName))
response = json.dumps({'response' : lst})
return response
def get_recipe_short_descriptions():
"""Returns a list of valid recipe IDS with small descriptions.
@return: a JSON object - list of recipe IDs, where recipe ID is a number
"""
"""
1. Get listing of files in the directory
2. Make array of integers with the names of the files
"""
dirListing = os.listdir("recipeJSONs/")
lst = []
tempName = []
for item in dirListing:
if ".json" in item:
tempName = os.path.splitext(item)[0]
with open(os.path.join("recipeJSONs", item)) as f:
recipeInfo = json.load(f)
lst.append({
"RecipeID": recipeInfo["RecipeID"],
"Title": recipeInfo["Title"],
"Description": recipeInfo["Description"],
"ImageURL": recipeInfo["ImageURL"]
})
response = json.dumps({'response' : lst})
return response
#return jsonify({'list': recipe_blob_dct.keys()})
def get_recipe_info(recipe_id):
"""Information about the recipe
@type recipe_id: number
@return: a JSON object
"""
"""
1. Get list of files
2. If recipe_id present in list, get the contents of the files
3. Else, make API call to get the JSON data from bigOven
4. jsonify the data and return
"""
dct = json.loads(get_recipe_ids())
lst = dct['response']
if recipe_id not in lst:
url_path = "http://api.bigoven.com/recipe/" + str(recipe_id) + "?api_key=" + bigOvenAPIkey
headers = {'content-type': 'application/json'}
req = requests.get(url_path, headers=headers)
content = req.content
recipe_info = json.loads(content)
else:
json_fname = "%d.json" % recipe_id
json_path = os.path.join("recipeJSONs", json_fname)
recipe_info_str = open(json_path).read()
recipe_info = json.loads(recipe_info_str)
return json.dumps({'response' : recipe_info})
def main():
print get_recipe_info(158905)
if __name__ == '__main__':
main()
| apache-2.0 | -4,177,722,889,112,337,000 | 27.357724 | 100 | 0.59547 | false | 3.577436 | false | false | false |
hrayr-artunyan/shuup | shuup/default_tax/models.py | 2 | 4052 | # This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.db import models
from django.db.models import Q
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from shuup.core.models import CustomerTaxGroup, Tax, TaxClass
from shuup.utils.patterns import Pattern, pattern_matches
class TaxRuleQuerySet(models.QuerySet):
def may_match_postal_code(self, postalcode):
null = Q(_postal_codes_min__isnull=True)
in_range = Q()
if postalcode:
in_range = Q(_postal_codes_min__lte=postalcode, _postal_codes_max__gte=postalcode)
return self.filter(null | in_range)
@python_2_unicode_compatible
class TaxRule(models.Model):
enabled = models.BooleanField(default=True, verbose_name=_('enabled'), db_index=True)
tax_classes = models.ManyToManyField(
TaxClass,
verbose_name=_("tax classes"), help_text=_(
"Tax classes of the items to be taxed"))
customer_tax_groups = models.ManyToManyField(
CustomerTaxGroup, blank=True,
verbose_name=_("customer tax groups"))
country_codes_pattern = models.CharField(
max_length=300, blank=True,
verbose_name=_("country codes pattern"))
region_codes_pattern = models.CharField(
max_length=500, blank=True,
verbose_name=_("region codes pattern"))
postal_codes_pattern = models.CharField(
max_length=500, blank=True,
verbose_name=_("postal codes pattern"))
_postal_codes_min = models.CharField(max_length=100, blank=True, null=True)
_postal_codes_max = models.CharField(max_length=100, blank=True, null=True)
priority = models.IntegerField(
default=0,
verbose_name=_("priority"), help_text=_(
"Rules with same priority define added taxes (e.g. US taxes) "
"and rules with different priority define compound taxes "
"(e.g. Canada Quebec PST case)"))
override_group = models.IntegerField(
default=0,
verbose_name=_("override group number"), help_text=_(
"If several rules match, only the rules with the highest "
"override group number will be effective. This can be "
"used, for example, to implement tax exemption by adding "
"a rule with very high override group that sets a zero tax."))
tax = models.ForeignKey(Tax, on_delete=models.PROTECT, verbose_name=_('tax'))
objects = TaxRuleQuerySet.as_manager()
def matches(self, taxing_context):
"""
Check if this tax rule matches given taxing context.
:type taxing_context: shuup.core.taxing.TaxingContext
"""
if taxing_context.customer_tax_group:
tax_groups = set(self.customer_tax_groups.all())
if tax_groups:
if taxing_context.customer_tax_group not in tax_groups:
return False
if self.country_codes_pattern:
if not pattern_matches(self.country_codes_pattern, taxing_context.country_code):
return False
if self.region_codes_pattern:
if not pattern_matches(self.region_codes_pattern, taxing_context.region_code):
return False
if self.postal_codes_pattern:
if not pattern_matches(self.postal_codes_pattern, taxing_context.postal_code):
return False
return True
def save(self, *args, **kwargs):
if self.postal_codes_pattern:
min_value, max_value = Pattern(self.postal_codes_pattern).get_alphabetical_limits()
self._postal_codes_min = min_value
self._postal_codes_max = max_value
return super(TaxRule, self).save(*args, **kwargs)
def __str__(self):
return _("Tax rule {} ({})").format(self.pk, self.tax)
| agpl-3.0 | -5,399,539,020,065,074,000 | 40.773196 | 95 | 0.651037 | false | 3.960899 | false | false | false |
SalesforceEng/Providence | tests/repos/test_repotracker.py | 1 | 2927 | '''
Copyright (c) 2015, Salesforce.com, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import unittest
import os
import json
import string
import random
import getpass
from datetime import datetime
from Empire.creds import CredentialManager
import config
configuration = config.Configuration()
credentials_file = configuration.get('credentials_file')
credential_key = os.environ.get('CREDENTIAL_KEY')
if credential_key is None:
credential_key = getpass.getpass('Credential Key:')
credential_manager = CredentialManager(credentials_file, credential_key)
config.credential_manager = credential_manager
from repos.repotracker import RepoTracker
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class RepoTrackerTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_last_identifier(self):
repo = RepoTracker()
temp_identifier = id_generator()
repo.update_identifier("test-identifier", temp_identifier)
fetched_identifier = repo.last_identifier("test-identifier")
self.assertEqual(temp_identifier, fetched_identifier)
def test_last_run_completed(self):
repo = RepoTracker()
temp_last_run = datetime.utcnow()
repo.update_last_run_completed("test-last-run",temp_last_run)
fetched_last_run = repo.last_run_completed("test-last-run")
self.assertEqual(temp_last_run, fetched_last_run)
| bsd-3-clause | -3,761,808,230,218,270,000 | 45.460317 | 755 | 0.767339 | false | 4.49616 | true | false | false |
buxx/intelligine | intelligine/simulation/object/molecule/MoleculeGland.py | 1 | 1276 | from intelligine.core.exceptions import BestMoleculeHere, MoleculeGlandDisabled
from intelligine.simulation.molecule.DirectionMolecule import DirectionMolecule
class MoleculeGland():
def __init__(self, host, context):
self._molecule_type = None
self._host = host
self._context = context
self._enabled = False
def set_molecule_type(self, molecule_type):
self._molecule_type = molecule_type
def get_molecule_type(self):
if self._molecule_type is None:
raise Exception("molecule_type not specified")
return self._molecule_type
def get_molecule(self):
raise NotImplementedError()
def appose(self):
if not self._enabled:
raise MoleculeGlandDisabled()
try:
DirectionMolecule.appose(self._context,
self._host.get_position(),
self.get_molecule())
except BestMoleculeHere as best_molecule_here:
self._host.get_brain().set_distance_from_objective(best_molecule_here.get_best_distance())
def disable(self):
self._enabled = False
def enable(self):
self._enabled = True
def is_enabled(self):
return self._enabled | apache-2.0 | 1,086,530,290,361,724,900 | 29.404762 | 102 | 0.618339 | false | 4.4 | false | false | false |
jeliasherrero/SeminarioTheano2015 | logistic_reg.py | 1 | 1781 | import numpy
import theano
import theano.tensor as T
rng = numpy.random
N = 400
feats = 784
D = (rng.randn(N, feats).astype(theano.config.floatX),
rng.randint(size=N,low=0, high=2).astype(theano.config.floatX))
training_steps = 10000
# Declare Theano symbolic variables
x = T.matrix("x")
y = T.vector("y")
w = theano.shared(rng.randn(feats).astype(theano.config.floatX), name="w")
b = theano.shared(numpy.asarray(0., dtype=theano.config.floatX), name="b")
x.tag.test_value = D[0]
y.tag.test_value = D[1]
#print "Initial model:"
#print w.get_value(), b.get_value()
# Construct Theano expression graph
p_1 = 1 / (1 + T.exp(-T.dot(x, w)-b)) # Probability of having a one
prediction = p_1 > 0.5 # The prediction that is done: 0 or 1
xent = -y*T.log(p_1) - (1-y)*T.log(1-p_1) # Cross-entropy
cost = xent.mean() + 0.01*(w**2).sum() # The cost to optimize
gw,gb = T.grad(cost, [w,b])
# Compile expressions to functions
train = theano.function(
inputs=[x,y],
outputs=[prediction, xent],
updates={w:w-0.01*gw, b:b-0.01*gb},
name = "train")
predict = theano.function(inputs=[x], outputs=prediction,
name = "predict")
if any([x.op.__class__.__name__ in ['Gemv', 'CGemv', 'Gemm', 'CGemm'] for x in
train.maker.fgraph.toposort()]):
print 'Used the cpu'
elif any([x.op.__class__.__name__ in ['GpuGemm', 'GpuGemv'] for x in
train.maker.fgraph.toposort()]):
print 'Used the gpu'
else:
print 'ERROR, not able to tell if theano used the cpu or the gpu'
print train.maker.fgraph.toposort()
for i in range(training_steps):
pred, err = train(D[0], D[1])
#print "Final model:"
#print w.get_value(), b.get_value()
print "target values for D"
print D[1]
print "prediction on D"
print predict(D[0])
| gpl-2.0 | -1,169,153,566,435,132,400 | 29.706897 | 78 | 0.64009 | false | 2.800314 | false | false | false |
rorytrent/the-duke | duke/tile.py | 1 | 1517 | from .actions import parse_actions, parse_commands
class Tile(object):
def __init__(self, name, front=None, back=None,
front_commands=None, back_commands=None):
self.name = name
self.front = parse_actions(front)
self.back = parse_actions(back)
self.front_commands = parse_commands(front_commands)
self.back_commands = parse_commands(back_commands)
def __str__(self):
return self.name
def __repr__(self):
return '<Tile {}>'.format(self.name.upper())
def action(self, x, y, side):
actions = self.front if side == 'front' else self.back
return actions.get((x, y))
def command(self, x, y, side):
commands = self.front_commands if side == 'front' else self.back_commands
return commands.get((x, y))
def on_board(self, color):
return BoardTile(self, color)
class BoardTile(Tile):
def __init__(self, parent, color):
super(BoardTile, self).__init__(front=parent.front, back=parent.back,
front_commands=parent.front_commands,
back_commands=parent.back_commands)
self.color = color
self.side = 'front'
def flip(self):
self.side = 'back' if self.side == 'front' else 'front'
def action(self, x, y):
return super(BoardTile, self).action(x, y, self.side)
def command(self, x, y):
return super(BoardTile, self).command(x, y, self.side)
| gpl-3.0 | -8,379,709,236,508,500,000 | 30.604167 | 81 | 0.584047 | false | 3.764268 | false | false | false |
koditr/xbmc-tr-team-turkish-addons | plugin.video.temizlik.imandan.gelir/default.py | 1 | 7503 | import urllib,urllib2,re,xbmcplugin,xbmcgui
#!/usr/bin/python
# -*- coding: utf-8 -*-
XbmcTRteam='http://XbcmTR.com'
def CATEGORIES():
addDir('[COLOR red][B]>> INFOYU OKUYUNUZ <<[/B][/COLOR] ', "INFO(name)",'7','http://www.kanal23.com/dosya/unlem-1.jpg')
addDir('[COLOR orange][B]>> [/B][/COLOR]'+ '[COLOR beige][B]PACKAGES TEMIZLIGI - 1 -[/B][/COLOR] ', "MAINDEL(name)",'1','http://ryayla.com/uploads/images/okey.png')
addDir('[COLOR orange][B]>> [/B][/COLOR]'+ '[COLOR beige][B]CACHE TEMIZLIGI - 2 -[ Sadece PC ][/B][/COLOR] ', "MAINDEL2(name)",'2','http://ryayla.com/uploads/images/okey.png')
addDir('[COLOR yellow][B]>> [/B][/COLOR]'+ '[COLOR yellow][B]! Apple TV & Android BOX Cache Temizligi ![/B][/COLOR] ', "MAINDEL3(name)",'11','http://ryayla.com/uploads/images/okey.png')
def MAINDEL(name):
dialog = xbmcgui.Dialog()
ret = dialog.yesno('DreamTR Team UYARI', 'PACKAGES temizliginden Eminmisiniz ! ','','','No', 'Yes')
if ret:
import os
folder = xbmc.translatePath(os.path.join('special://home/addons/packages/', ''))
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
dialog = xbmcgui.Dialog(e)
i = dialog.ok('!!! Packages !!!', "[COLOR beige]Packages Temizliginiz Bitmistir[/COLOR]","[COLOR pink]iyi kullanimlar.[/COLOR]")
def MAINDEL2(name):
dialog = xbmcgui.Dialog()
ret = dialog.yesno('DreamTR Team UYARI', 'CACHE temizliginden Eminmisiniz ! ','','','No', 'Yes')
if ret:
import os
folder = xbmc.translatePath(os.path.join('special://home/cache', ''))
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
dialog = xbmcgui.Dialog(e)
i = dialog.ok('Temizlendi Uyarisi !!!', "[COLOR beige]Temizliginiz basariyla bitmistir[/COLOR]","[COLOR pink]iyi kullanimlar.[/COLOR]")
def MAINDEL3(name):
dialog = xbmcgui.Dialog()
ret = dialog.yesno('DreamTR Team UYARI', 'Apple TV & Android Box - CACHE temizliginden Eminmisiniz ! ','','','No', 'Yes')
if ret:
import os
folder = xbmc.translatePath(os.path.join('special://home/temp', ''))
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
dialog = xbmcgui.Dialog(e)
i = dialog.ok('Temizlendi Uyarisi !!!', "[COLOR beige]Temizliginiz basariyla bitmistir[/COLOR]","[COLOR pink]iyi kullanimlar.[/COLOR]")
def MAINDEL4(name):
dialog = xbmcgui.Dialog()
ret = dialog.yesno('!!Dream Sifrenizi Silmek icin Eminmisiniz !!', '! Dream Sifrelerinizi Silmek istediginizden Eminmisiniz !! ','','','No', 'Yes')
if ret:
import os
folder = xbmc.translatePath(os.path.join('special://home/userdata/addon_data/plugin.video.dream-clup', ''))
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
dialog = xbmcgui.Dialog(e)
i = dialog.ok('!Dream Sifreniz Silindi!', "[COLOR beige]Dream Sifreler Silindi ![/COLOR]","[COLOR pink]iyi kullanimlar.[/COLOR]")
def MAINDEL5(name):
dialog = xbmcgui.Dialog()
ret = dialog.yesno('!!MagicTR Sifrenizi Silmek icin Eminmisiniz !!', '! MagicTR Sifrelerinizi Silmek istediginizden Eminmisiniz !! ','','','No', 'Yes')
if ret:
import os
folder = xbmc.translatePath(os.path.join('special://home/userdata/addon_data/plugin.video.magicTR', ''))
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
dialog = xbmcgui.Dialog(e)
i = dialog.ok('!MagicTR Sifreniz Silindi!', "[COLOR beige]MagicTR Sifreler Silindi ![/COLOR]","[COLOR pink]iyi kullanimlar.[/COLOR]")
def INFO(url):
try:
CATEGORIES()
dialog = xbmcgui.Dialog()
i = dialog.ok(url, "[COLOR beige]XBMC daha hizli ve sorunsuz kullanmaniz icindir.[/COLOR]","[COLOR yellow]Bu Islemleri SIK SIK YAPINIZ !![/COLOR]")
except:
pass
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def addLink(name,url,iconimage):
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz)
return ok
def addDir(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
params=get_params()
url=None
name=None
mode=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
print "Mode: "+str(mode)
print "URL: "+str(url)
print "Name: "+str(name)
if mode==None or url==None or len(url)<1:
print ""
CATEGORIES()
elif mode==1:
print ""+url
MAINDEL(name)
elif mode==2:
print ""+url
MAINDEL2(name)
elif mode==11:
print ""+url
MAINDEL3(name)
elif mode==12:
print ""+url
MAINDEL4(name)
elif mode==13:
print ""+url
MAINDEL5(name)
elif mode==7:
print ""+url
INFO(url)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| gpl-2.0 | -1,518,281,774,249,439,200 | 39 | 193 | 0.536585 | false | 3.391953 | false | false | false |
hoskerism/reefx | systemmonitor.py | 1 | 2341 | #!/user/bin/python
from datetime import datetime, timedelta
import os
from constants import MessageCodes, Sensors, Statuses, DebugLevels
import db
from workerthread import WorkerThread
class SystemMonitor(WorkerThread):
def __init__(self, inQueue, outQueue):
self.initTime = datetime.now()
super(SystemMonitor, self).__init__(inQueue, outQueue)
RUNTIME = 900
EXCEPTION_TIMEOUT = 60
FRIENDLY_NAME = "System Monitor"
def dowork(self):
diskSpace = self.readsensor(Sensors.DISK_SPACE)
if diskSpace < 100*10**6:
self.setstatus(Statuses.ALERT, "Diskspace {0}MB below 100MB".format(int(diskSpace/10**6)))
elif diskSpace < 1000*10**6:
self.setstatus(Statuses.WARNING, "Diskspace {0}MB below 1000MB".format(int(diskSpace/10**6)))
availableMemory = self.readsensor(Sensors.AVAILABLE_MEMORY)
if availableMemory < 30:
self.setstatus(Statuses.ALERT, "Available memory {0}% below 25%".format(availableMemory))
elif availableMemory < 50:
self.setstatus(Statuses.WARNING, "Available memory {0}% below 30%".format(availableMemory))
# TODO: We can add a CPU fan if necessary
cpuTemp = self.readsensor(Sensors.CPU_TEMP)
if cpuTemp > 70:
self.setstatus(Statuses.ALERT, "CPU temp {0} above 70".format(cpuTemp))
elif cpuTemp > 60:
self.setstatus(Statuses.WARNING, "CPU temp {0} above 60".format(cpuTemp))
def getcapabilities(self, request):
upTime = datetime.now() - self.initTime
self.sensorReadings['SYSTEM_UP_TIME'] = {
MessageCodes.VALUE:upTime,
MessageCodes.FRIENDLY_VALUE:self.formatTimeDelta(upTime),
MessageCodes.FRIENDLY_NAME:'System Up Time'
}
return super(SystemMonitor, self).getcapabilities(request)
def formatTimeDelta(self, upTime):
daysPart = "{0} day, ".format(upTime.days) if upTime.days == 1 else "{0} days, ".format(upTime.days)
timePart = timedelta(seconds = upTime.seconds)
out = daysPart + str(timePart)
return out
def setup(self):
"""Nothing to do"""
return
def teardown(self, message):
"""Nothing to do"""
return
| gpl-2.0 | 1,238,680,166,440,448,800 | 35.15873 | 108 | 0.626655 | false | 3.818923 | false | false | false |
ultra-lstm/RNA-GAN | cGAN/utils.py | 1 | 2258 | """
Some codes from https://github.com/Newmu/dcgan_code
"""
from __future__ import division
import math
import json
import random
import scipy.misc
import numpy as np
from time import gmtime, strftime
# -----------------------------
# new added functions for pix2pix
def load_data(image_path, image_size, input_c_dim, output_c_dim, is_train=False):
input_img = imread(image_path)
images = np.split(input_img, input_c_dim + output_c_dim, axis=1)
half_offset = 8
offset = half_offset * 2
hypersize = image_size + offset
fullsize = 512 + offset
h1 = int(np.ceil(np.random.uniform(1e-2, offset)))
w1 = int(np.ceil(np.random.uniform(1e-2, offset)))
conv = []
for image in images:
#print(image.shape)
top = int((fullsize - image.shape[1]) / 2)
bottom = fullsize - image.shape[1] - top
image = np.append(np.zeros((image.shape[0], top)), image, axis=1)
image = np.append(image, np.zeros((image.shape[0], bottom)), axis=1)
left = int((fullsize - image.shape[0]) / 2)
right = fullsize - image.shape[0] - left
image = np.append(np.zeros((left, image.shape[1])), image, axis=0)
image = np.append(image, np.zeros((right, image.shape[1])), axis=0)
tmp = scipy.misc.imresize(image, [hypersize, hypersize], interp='nearest')
if is_train:
image = tmp[h1:h1+image_size, w1:w1+image_size]
else:
image = tmp[half_offset:half_offset+image_size, half_offset:half_offset+image_size]
image = image/127.5 - 1.
conv.append(image)
return np.stack(conv, axis=2)
# -----------------------------
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path):
return scipy.misc.imread(path).astype(np.float)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w, :] = image
return img
def imsave(images, size, path):
return scipy.misc.imsave(path, merge(images, size))
def inverse_transform(images):
return (images+1.)/2.
| mit | 4,987,736,922,287,926,000 | 29.931507 | 95 | 0.605846 | false | 3.093151 | false | false | false |
closeio/nylas | migrations/versions/056_message_unique_constraint.py | 8 | 2132 | """ Remove duplicated Gmail Message objects and tighten constraints for Gmail messages.
Revision ID: 4b4c5579c083
Revises: 1925c535a52d
Create Date: 2014-07-17 00:01:09.410292
"""
# revision identifiers, used by Alembic.
revision = '4b4c5579c083'
down_revision = '4b4674f1a726'
from alembic import op
from sqlalchemy import func
def upgrade():
op.drop_constraint('messagecontactassociation_ibfk_1',
'messagecontactassociation', type_='foreignkey')
op.drop_constraint('messagecontactassociation_ibfk_2',
'messagecontactassociation', type_='foreignkey')
op.create_foreign_key('messagecontactassociation_ibfk_1',
'messagecontactassociation', 'contact',
['contact_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('messagecontactassociation_ibfk_2',
'messagecontactassociation', 'message',
['message_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('imapuid_ibfk_2', 'imapuid', type_='foreignkey')
op.create_foreign_key('imapuid_ibfk_2', 'imapuid', 'message',
['message_id'], ['id'], ondelete='CASCADE')
from inbox.models import Message
from inbox.models.session import session_scope
with session_scope(versioned=False) \
as db_session:
groups = db_session.query(
Message.id, Message.thread_id, Message.g_msgid)\
.filter(~Message.g_msgid.is_(None))\
.group_by(Message.thread_id, Message.g_msgid).having(
func.count(Message.id) > 1).all()
for message_id, thread_id, g_msgid in groups:
print "deleting duplicates of ({}, {}), saving {}".format(
thread_id, g_msgid, message_id)
db_session.query(Message).filter(
Message.thread_id == thread_id,
Message.g_msgid == g_msgid,
Message.id != message_id).delete()
op.execute('ALTER TABLE message ADD UNIQUE INDEX ix_message_thread_id_g_msgid (thread_id, g_msgid)')
def downgrade():
pass
| agpl-3.0 | 2,814,679,746,964,648,400 | 38.481481 | 104 | 0.613039 | false | 3.883424 | false | false | false |
hellodata/wubacrawler | wubacrawler2_v1.1/process/start.py | 1 | 2178 | # -*- coding:utf-8 -*-
"""
start.py获取详细页html源码的程序
存放在数据库code_0507里面
start2.py对详细页html源码解析
存放在数据库code_0507里面
功能:先爬取……后解析……
"""
import redis
import time
from process.html_save import FetchDetailHtml
from process.list_page import SummaryFetchParse
def main():
"""
爬取详细页源码 存入code_0507数据库中
"""
r = redis.Redis(host = 'localhost', port = 6379, db = 1)
r.flushdb()
url_set = set()
#河南省 + 新疆省
#湖北省 + 湖南省 + 黑龙江省 + 吉林省 +广东省
#四川省 + 云南省 + 贵州省 + 陕西省
#城市简称列表
city_name_list = ['bt','chifeng','erds','hu','sjz','xj','changji','bygl','yili','aks',
'ks','hami','klmy','betl','tlf','ht',
'shz','kzls','ale','wjq','tmsk',
'ganzhou','nc','liuzhou','qinzhou','haikou',
'zz','luoyang','xx',]
#城市简称字典
city_dict = {'bt':24,'chifeng':32,'erds':13,'hu':48,'sjz':70,'xj':70,'changji':6,'bygl':7,'yili':5,'aks':6,
'ks':4,'hami':4,'klmy':4,'betl':4,'tlf':4,'ht':4,
'shz':6,'kzls':3,'ale':3,'wjq':3,'tmsk':3,
'ganzhou':31,'nc':70,'liuzhou':46,'qinzhou':5,'haikou':23,
'zz':70,'luoyang':28,'xx':12,}
while len(city_name_list) > 0:
city_name = city_name_list.pop()
print "======城市%s爬取开始======"%(city_name)
page_nums = city_dict.get(city_name)
page_num_start = 1
page_num_end = 2
while page_num_start <= page_nums:
p1 = SummaryFetchParse(r, url_set, page_num_start, page_num_end)
p1.get_city_list(city_name)
p2 = FetchDetailHtml(r, url_set)
p2.parser_to_save()
print "*********本次爬取结束***********"
time.sleep(60*1)
page_num_start = page_num_end + 1
page_num_end = page_num_end + 2
print "=====城市%s爬取结束====="%(city_name)
if __name__ == "__main__":
main()
| artistic-2.0 | 2,794,046,091,661,662,700 | 25.465753 | 111 | 0.495859 | false | 2.251748 | false | false | false |
Evanus/redstoner-utils | misc.py | 1 | 9100 | #pylint: disable = F0401
from helpers import *
from time import time as now
from time import sleep
from sys import exc_info
import thread
import org.bukkit.inventory.ItemStack as ItemStack
import org.bukkit.Bukkit as Bukkit
from basecommands import simplecommand
@hook.event("player.PlayerJoinEvent", "monitor")
def on_join(event):
"""
Welcome new players
"""
player = event.getPlayer()
# send welcome broadcast
if not player.hasPlayedBefore():
broadcast("utils.greet_new", "\n&a&lPlease welcome &f" + player.getDisplayName() + " &a<o Redstoner!\n")
# clear out some eventual crap before
msg(player, " \n \n \n \n \n \n \n \n \n \n \n \n ")
msg(player, " &4Welcome to the Redstoner Server!")
msg(player, " &6Before you ask us things, take a quick")
msg(player, " &6look at &a&nredstoner.com/info")
msg(player, " \n&6thank you and happy playing ;)")
msg(player, " \n \n")
# teleport to spawn when spawning inside portal
loginloc = player.getLocation().getBlock().getType()
headloc = player.getEyeLocation().getBlock().getType()
if "PORTAL" in [str(headloc), str(loginloc)]:
msg(player, "&4Looks like you spawned in a portal... Let me help you out")
msg(player, "&6You can use /back if you &nreally&6 want to go back")
player.teleport(player.getWorld().getSpawnLocation())
"""
This code fixes /up 0 destroying/replacing blocks in plots that are not yours.
If you use //up, this is caught by plotme and cancelled if you are not allowed to build.
However, if you use //up, WorldEdit does the following on "low" priority:
* Change the command to /up with the same arguments
* Run another event with /up but its cancelled (dunno why it does this)
Keep in mind that, on "lowest" priority, PlotMe might cancel events.
"""
dup = 0 #Used to store when someone used //up
@hook.event("player.PlayerCommandPreprocessEvent", "lowest")
def cmd_event(event):
global dup
if event.getMessage().split(" ")[0] in ("//up", "/worldedit:/up"):
dup = True
@hook.event("player.PlayerCommandPreprocessEvent", "normal")
def cmd_event2(event):
global dup
args = event.getMessage().split(" ")
if args[0].lower() in ("/up", "/worldedit:up"):
if dup: #If plotme cancelled this, it will not matter. This lets it through but PlotMe doesn't.
dup = False
elif not event.isCancelled():
event.setCancelled(True)
event.getPlayer().chat("//up " + " ".join(args[1:]))
""" Disabled while builder can't access Trusted
@hook.event("player.PlayerGameModeChangeEvent", "low")
def on_gamemode(event):
user = event.getPlayer()
if str(event.getNewGameMode()) != "SPECTATOR" and user.getWorld().getName() == "Trusted" and not user.hasPermission("mv.bypass.gamemode.Trusted"):
event.setCancelled(True)
"""
@hook.event("player.PlayerBedEnterEvent")
def on_bed_enter(event):
world = event.getPlayer().getWorld()
if world.getName() in ("Survival_1", "TrustedSurvival_1"):
for player in world.getPlayers():
player.setSleepingIgnored(True)
@hook.event("player.PlayerTeleportEvent")
def on_player_teleport(event):
"""
Disable spectator teleportation
"""
player = event.getPlayer()
if not event.isCancelled() and str(event.getCause()) == "SPECTATE" and not player.hasPermission("utils.tp.spectate"):
event.setCancelled(True)
msg(event.getPlayer(), "&cSpectator teleportation is disabled")
@hook.event("block.BlockFromToEvent", "highest")
def on_flow(event):
if event.isCancelled():
return
block = event.getToBlock()
if block.getWorld().getName() == "Creative" and rs_material_broken_by_flow(str(block.getType())):
event.setCancelled(True)
def rs_material_broken_by_flow(material):
if material in ("REDSTONE", "LEVER", "TRIPWIRE"):
return True
parts = material.split("_")
length = len(parts)
return length > 1 and (parts[0] == "DIODE" or parts[1] in ("TORCH", "WIRE", "BUTTON", "HOOK") or (length == 3 and parts[1] == "COMPARATOR"))
@simplecommand("sudo",
usage = "<player> [cmd..]",
description = "Makes <player> write [cmd..] in chat",
amin = 2,
helpNoargs = True)
def on_sudo_command(sender, command, label, args):
target = args[0]
cmd = " ".join(args[1:])
msg(sender, "&2[SUDO] &rRunning '&e%s&r' as &3%s" % (cmd, target))
is_cmd = cmd[0] == "/"
is_console = target.lower() in ["server", "console"]
if is_console:
server.dispatchCommand(server.getConsoleSender(), cmd[1:] if is_cmd else cmd)
return None
target_player = server.getPlayer(target)
if target_player and uid(target_player) not in pythoners:
target_player.chat(cmd)
return None
return "&cPlayer %s not found!" % target
@simplecommand("me",
usage = "[message..]",
description = "Sends a message in third person",
helpNoargs = True)
def on_me_command(sender, command, label, args):
text = colorify("&7- %s &7%s " % (sender.getDisplayName() if isinstance(sender, Player) else "&9CONSOLE", u"\u21E6"))
broadcast("utils.me", text + " ".join(args), usecolor = sender.hasPermission("essentials.chat.color"))
return None
@hook.command("pluginversions")
def on_pluginversions_command(sender, command, label, args):
"""
/pluginversions
print all plugins + versions; useful when updating plugins
"""
try:
plugin_header(sender, "Plugin versions")
plugins = [pl.getDescription() for pl in list(ArrayList(java_array_to_list(server.getPluginManager().getPlugins())))]
info(type(plugins[0]).__name__)
plugins.sort(key = lambda pl: pl.getDescription().getName())
msg(sender, "&3Listing all " + str(len(plugins)) + " plugins and their version:")
for plugin in plugins:
msg(sender, "&6" + pl.getDescription().getName() + "&r: &e" + pl.getDescription().getVersion())
return True
except:
error(trace())
@hook.command("echo")
def on_echo_command(sender, command, label, args):
"""
/echo
essentials echo sucks and prints mail alerts sometimes
"""
msg(sender, " ".join(args).replace("\\n", "\n"))
def eval_thread(sender, code):
"""
/pyeval
run python ingame
"""
try:
result = eval(code)
msg(sender, ">>> %s: %s" % (colorify("&3") + type(result).__name__, colorify("&a") + unicode(result) + "\n "), usecolor = False)
except:
e = exc_info()[1]
try:
eclass = e.__class__
except AttributeError:
eclass = type(e)
msg(sender, ">>> %s: %s" % (eclass.__name__, e) + "\n ", False, "c")
thread.exit()
pythoners = [
"e452e012-2c82-456d-853b-3ac8e6b581f5", # Nemes
"ae795aa8-6327-408e-92ab-25c8a59f3ba1", # jomo
"305ccbd7-0589-403e-a45b-d791dcfdee7d" # PanFritz
]
@simplecommand("pyeval",
usage = "[code..]",
description = "Runs python [code..] and returns the result",
helpNoargs = True)
def on_pyeval_command(sender, command, label, args):
if is_player(sender) and uid(sender) not in pythoners:
return noperm(sender)
msg(sender, " ".join(args), False, "e")
thread.start_new_thread(eval_thread, (sender, " ".join(args)))
return None
@simplecommand("tempadd",
usage = "<user> <group> [duration]",
description = "Temporarily adds <user> to <group> for \n[duration] minutes. Defaults to 1 week.",
helpNoargs = True,
helpSubcmd = True,
amin = 2,
amax = 3)
def tempadd_command(sender, command, label, args):
if not sender.hasPermission("permissions.manage.membership." + args[1]):
return "&cYou do not have permission to manage that group!"
if len(args) == 3:
if not args[2].isdigit():
return "&cThats not a number!"
duration = int(args[2]) * 60
else:
duration = 604800
if duration <= 0:
return "&cThats too short!"
cmd = "pex user %s group add %s * %s" % (args[0], args[1], duration)
runas(sender, cmd)
m, s = divmod(duration, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
return "&aAdded to group for %dd%dh%dm" % (d, h, m)
@hook.command("modules")
def on_modules_command(sender, command, label, args):
"""
/modules
list all modules, unloaded modules in red
"""
plugin_header(sender, "Modules")
msg(sender, ", ".join([(("&a" if mod in shared["modules"] else "&c") + mod) for mod in shared["load_modules"]]))
""" Something I'm planning for schematics
@hook.event("player.PlayerCommandPreprocessEvent", "low")
def on_command(event):
msg = " ".split(event.getMessage())
if len(msg) < 3:
return
if msg[0].lower() not in ("/schematic", "/schem"):
return
if msg[1].lower() not in ("save", "load"):
return
msg[2] = event.getPlayer().getName() + "/" + msg[2]
"""
| mit | -4,678,912,113,602,412,000 | 32.828996 | 150 | 0.621978 | false | 3.36414 | false | false | false |
tensorflow/probability | tensorflow_probability/python/internal/backend/numpy/ops.py | 1 | 22151 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Numpy implementations of TensorFlow functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
# Dependency imports
import numpy as np
import numpy as onp # Avoid JAX rewrite. # pylint: disable=reimported
import six
from tensorflow_probability.python.internal.backend.numpy import _utils as utils
from tensorflow_probability.python.internal.backend.numpy import nest
from tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape
try: # May not be available, not a core dep for TFP.
import wrapt # pylint: disable=g-import-not-at-top
except ImportError:
wrapt = None
__all__ = [
'bitcast',
'broadcast_dynamic_shape',
'broadcast_static_shape',
'broadcast_to',
'cast',
'clip_by_value',
'constant',
'control_dependencies',
'convert_to_tensor',
'custom_gradient',
'device',
'enable_v2_behavior',
'ensure_shape',
'executing_eagerly',
'get_static_value',
'identity',
'init_scope',
'is_tensor',
'name_scope',
'newaxis',
'register_tensor_conversion_function',
'stop_gradient',
'GradientTape',
'Module',
'Tensor',
'Variable',
# 'gradients',
]
JAX_MODE = False
if JAX_MODE:
import jax # pylint: disable=g-import-not-at-top
class _NullContext(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions.
def _broadcast_static_shape(shape_x, shape_y):
"""Reimplements `tf.broadcast_static_shape` in JAX/NumPy."""
if (tensor_shape.TensorShape(shape_x).ndims is None or
tensor_shape.TensorShape(shape_y).ndims is None):
return tensor_shape.TensorShape(None)
shape_x = tuple(tensor_shape.TensorShape(shape_x).as_list())
shape_y = tuple(tensor_shape.TensorShape(shape_y).as_list())
try:
if JAX_MODE:
error_message = 'Incompatible shapes for broadcasting'
return tensor_shape.TensorShape(lax.broadcast_shapes(shape_x, shape_y))
error_message = ('shape mismatch: objects cannot be broadcast to'
' a single shape')
return tensor_shape.TensorShape(
np.broadcast(np.zeros(shape_x), np.zeros(shape_y)).shape)
except ValueError as e:
# Match TF error message
if error_message in str(e):
raise ValueError(
'Incompatible shapes for broadcasting: {} and {}'.format(
shape_x, shape_y))
raise
def _broadcast_dynamic_shape(shape_x, shape_y):
"""Reimplements `tf.broadcast_dynamic_shape` in JAX/NumPy."""
return convert_to_tensor(_broadcast_static_shape(shape_x, shape_y))
broadcast_shape = _broadcast_static_shape
def _constant(value, dtype=None, shape=None, name='Const'): # pylint: disable=unused-argument
x = convert_to_tensor(value, dtype=dtype)
if shape is None:
return x
if not x.shape:
return np.full(shape, x)
return np.reshape(x, shape)
def _control_dependencies(control_inputs):
if control_inputs:
for control in control_inputs:
if callable(control):
control()
return _NullContext()
tensor_conversion_registry = {}
def register_tensor_conversion_function(base_type, conversion_func):
# No priority system like TensorFlow yet
tensor_conversion_registry[base_type] = conversion_func
def _convert_to_tensor(value, dtype=None, dtype_hint=None, name=None): # pylint: disable=unused-argument
"""Emulates tf.convert_to_tensor."""
dtype = utils.numpy_dtype(dtype)
dtype_hint = utils.numpy_dtype(dtype_hint)
if is_tensor(value) and not isinstance(value, Variable):
# In NumPy mode, we are lenient on the dtype compatibility check because
# some codepaths rely on flexible conversion from int/float64 to 32.
if dtype is not None and value.dtype != dtype:
if JAX_MODE:
raise TypeError(('Tensor conversion requested dtype {} for array with '
'dtype {}: {}').format(dtype, value.dtype, value))
return value.astype(dtype)
return value
conversion_func = tensor_conversion_registry.get(type(value),
_default_convert_to_tensor)
ret = None
if dtype is None and dtype_hint is not None:
try:
ret = conversion_func(value, dtype=dtype_hint)
except (TypeError, ValueError):
pass
if ret is None:
ret = conversion_func(value, dtype=dtype)
return ret
def _infer_dtype(value, default_dtype):
"""Guesses an object's dtype."""
# Need to check for onp type first because onp types are subclasses of Python
# types.
if hasattr(value, 'dtype'):
# Duck-typing onp types
return value.dtype
elif isinstance(value, bool):
return np.bool_
elif isinstance(value, six.integer_types):
return np.int32
elif isinstance(value, float):
return np.float32
elif isinstance(value, complex):
return np.complex128
elif isinstance(value, (tuple, list)):
# Try inferring the type from items in the object if possible.
for v in nest.flatten(value):
if hasattr(v, 'dtype'):
return v.dtype
try: # Finally fall back to raw types (int, bool).
return _infer_dtype(value[0], default_dtype)
except (IndexError, TypeError):
return default_dtype
raise ValueError(('Attempt to convert a value ({})'
' with an unsupported type ({}) to a Tensor.').format(
value, type(value)))
class _Int64ToInt32Error(TypeError):
"""Error thrown when trying to convert an int64 to int32."""
def __init__(self, int_value):
self.int_value = int_value
super(_Int64ToInt32Error, self).__init__('Overflow when casting an int64 to'
' an int32.')
class _FloatToIntError(TypeError):
"""Error thrown when trying to convert a float to an int."""
def _is_int64(value):
return value > onp.iinfo(onp.int32).max or value < onp.iinfo(onp.int32).min
def _default_convert_to_tensor(value, dtype=None):
"""Default tensor conversion function for array, bool, int, float, and complex."""
inferred_dtype = _infer_dtype(value, np.float32)
# When a dtype is provided, we can go ahead and try converting to the dtype
# and force overflow/underflow if an int64 is converted to an int32.
if dtype is not None:
try:
return _default_convert_to_tensor_with_dtype(value, dtype)
except _Int64ToInt32Error as e:
# Force conversion to int32 if requested
return e.int_value
# If no dtype is provided, we try the inferred dtype and fallback to int64 or
# float32 depending on the type of conversion error we see.
try:
return _default_convert_to_tensor_with_dtype(value, inferred_dtype)
except _Int64ToInt32Error as e:
return np.array(value, dtype=np.int64)
except _FloatToIntError as e:
return np.array(value, dtype=np.float32)
class TypeConversionError(TypeError):
def __init__(self, value, dtype):
super(TypeConversionError, self).__init__(
'Cannot convert {} to array of dtype {}'.format(value, dtype))
class MixedTypesError(ValueError):
def __init__(self):
super(MixedTypesError, self).__init__('Can\'t convert Python sequence with'
' mixed types to Tensor.')
def _default_convert_to_tensor_with_dtype(value, dtype,
error_if_mismatch=False):
"""Converts a value to a tensor with a given dtype.
Args:
value: An object to be converted to tensor.
dtype: A NPTF dtype.
error_if_mismatch: Enables a stricter check for use when converting an
iterable from a tensor.
Returns:
A tensor.
Raises:
TypeConversionError: If type conversion fails.
MixedTypesError: If types are mismatched in an iterable context.
ValueError: If object isn't convertible to tensor.
_Int64ToInt32Error: If trying to convert an int64 to an int32.
_FloatToIntError: If trying to convert a float to an int.
"""
is_arraylike = hasattr(value, 'dtype')
if is_arraylike:
# Duck-typed for `onp.array`/`onp.generic`
arr = np.array(value)
if dtype is not None:
# arr.astype(None) forces conversion to float64
return arr.astype(dtype)
return arr
elif isinstance(value, complex):
dtype_compatible = np.issubdtype(dtype, np.complexfloating)
if not dtype_compatible:
if error_if_mismatch:
raise MixedTypesError()
raise TypeConversionError(value, dtype)
elif isinstance(value, bool):
# Bool check needs to happen before int check because bools are instances of
# int.
dtype_compatible = (dtype == np.bool_ or np.issubdtype(dtype, np.integer)
or np.issubdtype(dtype, np.floating))
if not dtype_compatible:
if error_if_mismatch:
raise MixedTypesError()
raise TypeError(value, dtype)
elif isinstance(value, six.integer_types):
if error_if_mismatch and not (np.issubdtype(dtype, np.integer)
or np.issubdtype(dtype, np.floating)):
raise MixedTypesError()
if dtype == np.int32 and _is_int64(value):
raise _Int64ToInt32Error(np.array(value, dtype=dtype))
if dtype == np.bool_:
# Can't downcast an int to a bool
raise TypeConversionError(value, dtype)
elif isinstance(value, float):
if error_if_mismatch and not (np.issubdtype(dtype, np.integer)
or np.issubdtype(dtype, np.floating)):
raise MixedTypesError()
if np.issubdtype(dtype, np.integer):
raise _FloatToIntError(
'Cannot convert {} to array of dtype {}'.format(value, dtype))
if not (np.issubdtype(dtype, np.floating)
or np.issubdtype(dtype, np.complexfloating)):
raise TypeConversionError(value, dtype)
else:
# Try to iterate through object and throw ValueError if we can't.
if hasattr(value, '__getitem__'):
ret = []
error_in_list = False
for v in value:
ret.append(_default_convert_to_tensor_with_dtype(
v, dtype, error_if_mismatch=error_in_list))
error_in_list = True
value = ret
else:
raise ValueError(
('Attempting to convert a value {} with an'
' unsupported type {} to a Tensor.').format(value, type(value)))
return np.array(value, dtype=dtype)
@contextlib.contextmanager
def _init_scope():
yield
# --- Begin Public Functions --------------------------------------------------
class GradientTape(object):
"""tf.GradientTape stub."""
def __init__(self, persistent=False, watch_accessed_variables=True): # pylint: disable=unused-argument
raise NotImplementedError('GradientTape not currently supported in JAX and '
'NumPy backends.')
def __enter__(self):
return self
def __exit__(self, typ, value, traceback): # pylint: disable=unused-argument
pass
def watch(self, tensor): # pylint: disable=unused-argument
pass
def gradient(self, target, sources, output_gradients=None, # pylint: disable=unused-argument
unconnected_gradients=None): # pylint: disable=unused-argument
raise NotImplementedError
def batch_jacobian(self, target, source, # pylint: disable=unused-argument
unconnected_gradients=None, # pylint: disable=unused-argument
parallel_iterations=None, experimental_use_pfor=True): # pylint: disable=unused-argument
raise NotImplementedError
bitcast = utils.copy_docstring(
'tf.bitcast',
lambda input, type, name=None: convert_to_tensor( # pylint: disable=g-long-lambda
input, dtype_hint=type).view(type))
broadcast_dynamic_shape = utils.copy_docstring(
'tf.broadcast_dynamic_shape', _broadcast_dynamic_shape)
broadcast_static_shape = utils.copy_docstring(
'tf.broadcast_static_shape', _broadcast_static_shape)
broadcast_to = utils.copy_docstring(
'tf.broadcast_to',
lambda input, shape, name=None: np.broadcast_to(input, shape))
def _cast(x, dtype):
x = np.asarray(x)
if (np.issubdtype(x.dtype, np.complexfloating) and
not np.issubdtype(dtype, np.complexfloating)):
x = np.real(x)
return x.astype(dtype)
cast = utils.copy_docstring(
'tf.cast',
lambda x, dtype, name=None: _cast(x, utils.numpy_dtype(dtype)))
clip_by_value = utils.copy_docstring(
'tf.clip_by_value',
lambda t, clip_value_min, clip_value_max, name=None: # pylint: disable=g-long-lambda
np.clip(t, clip_value_min, clip_value_max))
constant = utils.copy_docstring(
'tf.constant',
_constant)
control_dependencies = utils.copy_docstring(
'tf.control_dependencies',
_control_dependencies)
convert_to_tensor = utils.copy_docstring(
'tf.convert_to_tensor',
_convert_to_tensor)
def _custom_gradient(f):
"""JAX implementation of tf.custom_gradient."""
if not JAX_MODE:
# Numpy backend ignores custom gradients, so we do too.
return lambda *args, **kwargs: f(*args, **kwargs)[0]
@jax.custom_gradient
@functools.wraps(f)
def wrapped(*args, **kwargs):
value, vjp = f(*args, **kwargs)
def vjp_(cts_out):
cts_in = vjp(cts_out)
if isinstance(cts_in, list):
cts_in = tuple(cts_in)
return cts_in
return value, vjp_
return wrapped
custom_gradient = utils.copy_docstring(
'tf.custom_gradient', _custom_gradient)
device = lambda _: _NullContext()
def _ensure_shape(x, shape, name=None): # pylint: disable=unused-argument
x_shape = tensor_shape.TensorShape(x.shape)
shape = tensor_shape.TensorShape(shape)
if not shape.is_compatible_with(x_shape):
msg = 'Shape of tensor x {} is not compatible with expected shape {}'
raise ValueError(msg.format(x_shape, shape))
return x
ensure_shape = utils.copy_docstring(
'tf.ensure_shape', _ensure_shape)
executing_eagerly = utils.copy_docstring(
'tf.executing_eagerly',
lambda: True)
def _get_static_value_jax(tensor, partial=False):
"""JAX implementation of tf.get_static_value."""
del partial
if isinstance(tensor, jax.core.Tracer):
return None
if isinstance(tensor, NumpyVariable):
return None
if isinstance(tensor, Module):
return None
if isinstance(tensor, np.ndarray):
return onp.array(tensor)
return tensor
def _get_static_value_numpy(tensor, partial=False):
"""NumPy implementation of tf.get_static_value."""
del partial
if isinstance(tensor, NumpyVariable):
return None
if isinstance(tensor, Module):
return None
return tensor
get_static_value = utils.copy_docstring(
'tf.get_static_value',
_get_static_value_jax if JAX_MODE else _get_static_value_numpy)
identity = utils.copy_docstring(
'tf.identity',
lambda input, name=None: np.array(input))
is_tensor = utils.copy_docstring(
'tf.is_tensor',
lambda x: isinstance(x, Tensor))
init_scope = utils.copy_docstring('tf.init_scope', _init_scope)
class name_scope(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,
and `MyOp/c`.
If the scope name already exists, the name will be made unique by appending
`_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`,
etc.
"""
@property
def name(self):
return self._name
def __init__(self, name, *args, **kwargs):
del args, kwargs
self._name = name
def __enter__(self):
return self._name
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions.
newaxis = np.newaxis
if JAX_MODE:
from jax import lax # pylint: disable=g-import-not-at-top
stop_gradient = utils.copy_docstring(
'tf.stop_gradient',
lambda input, name=None: lax.stop_gradient(input))
else:
stop_gradient = utils.copy_docstring(
'tf.stop_gradient',
lambda input, name=None: np.array(input))
def _convert_tensorshape_to_tensor(value, dtype=None):
"""Copied from TF's TensorShape conversion."""
if not value.is_fully_defined():
raise ValueError(
'Cannot convert a partially known TensorShape to a Tensor: {}'.format(
value))
value_list = value.as_list()
int64_value = 0
for dim in value_list:
if dim >= 2**31:
int64_value = dim
break
if dtype is not None:
if dtype not in (np.int32, np.int64):
raise TypeConversionError(value, dtype)
if dtype == np.int32 and int64_value:
raise ValueError('Cannot convert a TensorShape to dtype int32; '
'a dimension is too large ({})'.format(int64_value))
else:
dtype = np.int64 if int64_value else np.int32
return convert_to_tensor(value_list, dtype=dtype)
register_tensor_conversion_function(tensor_shape.TensorShape,
_convert_tensorshape_to_tensor)
def _convert_dimension_to_tensor(value, dtype=None):
dtype = dtype or np.int32
if dtype not in (np.int32, np.int64):
raise TypeConversionError(value, dtype)
return convert_to_tensor(tensor_shape.dimension_value(value), dtype=dtype)
register_tensor_conversion_function(tensor_shape.Dimension,
_convert_dimension_to_tensor)
class NumpyVariable(getattr(wrapt, 'ObjectProxy', object)):
"""Stand-in for tf.Variable."""
__slots__ = ('initializer',)
# pylint: disable=unused-argument
def __init__(
self,
initial_value=None,
trainable=True,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
import_scope=None,
constraint=None,
shape=None):
assert constraint is None
v = convert_to_tensor(initial_value)
if dtype is not None:
v = v.astype(utils.numpy_dtype(dtype))
super(NumpyVariable, self).__init__(v)
self._self_name = name
self.initializer = None
# pylint: enable=unused-argument
@property
def name(self):
return self._self_name if self._self_name is not None else str(id(self))
def __array__(self, dtype=None):
if dtype is not None:
dtype = utils.numpy_dtype(dtype)
return self.__wrapped__.__array__(dtype)
# Passing in dtype=None to __array__ has differing behavior in numpy.
# When an `np.ndarray` has `.__array__(None)` invoked, the array is casted
# to `float64`. Thus we handle this case separately.
return self.__wrapped__.__array__()
def assign(self, value, **_):
super(NumpyVariable, self).__init__(onp.array(value, dtype=self.dtype))
return self
def assign_add(self, value, **_):
super(NumpyVariable, self).__init__(
onp.array(self, dtype=self.dtype) + onp.array(value, dtype=self.dtype))
return self
def assign_sub(self, value, **_):
super(NumpyVariable, self).__init__(
onp.array(self, dtype=self.dtype) - onp.array(value, dtype=self.dtype))
return self
if JAX_MODE:
jax.interpreters.xla.canonicalize_dtype_handlers[NumpyVariable] = (
jax.interpreters.xla.canonicalize_dtype_handlers[onp.ndarray])
jax.interpreters.xla.pytype_aval_mappings[NumpyVariable] = (
jax.interpreters.xla.pytype_aval_mappings[onp.ndarray])
jax.core.pytype_aval_mappings[NumpyVariable] = (
jax.core.pytype_aval_mappings[onp.ndarray])
def _convert_variable_to_tensor(value, dtype=None):
return convert_to_tensor(value.__wrapped__, dtype=dtype)
register_tensor_conversion_function(NumpyVariable, _convert_variable_to_tensor)
Variable = NumpyVariable
class _TensorMeta(type(np.ndarray)):
@classmethod
def __instancecheck__(cls, instance):
if JAX_MODE:
return isinstance(instance, (jax.xla.DeviceArray,
jax.core.Tracer))
return isinstance(instance, np.ndarray)
class Tensor(six.with_metaclass(_TensorMeta)):
OVERLOADABLE_OPERATORS = frozenset((
# Binary.
'__add__',
'__radd__',
'__sub__',
'__rsub__',
'__mul__',
'__rmul__',
'__truediv__',
'__rtruediv__',
'__floordiv__',
'__rfloordiv__',
'__mod__',
'__rmod__',
'__lt__',
'__le__',
'__gt__',
'__ge__',
'__ne__',
'__eq__',
'__and__',
'__rand__',
'__or__',
'__ror__',
'__xor__',
'__rxor__',
'__getitem__',
'__pow__',
'__rpow__',
# Unary.
'__invert__',
'__neg__',
'__abs__',
'__matmul__',
'__rmatmul__'
))
class Module(object):
"""tf.Module."""
_TF_MODULE_IGNORED_PROPERTIES = frozenset()
def __init__(self, name):
self._name = name
def _no_dependency(self, x):
return x
@property
def trainable_variables(self):
return []
@property
def variables(self):
return []
enable_v2_behavior = lambda: None
| apache-2.0 | -8,286,430,538,326,064,000 | 29.98042 | 110 | 0.652612 | false | 3.669814 | false | false | false |
kshedstrom/pyroms | examples/cobalt-preproc/Clim_bio/make_clim_file_bio_addons.py | 1 | 4460 | import subprocess
import os
import sys
import commands
import numpy as np
import pyroms
import pyroms_toolbox
from remap_bio_woa import remap_bio_woa
from remap_bio_glodap import remap_bio_glodap
data_dir_woa = '/archive/u1/uaf/kate/COBALT/'
data_dir_glodap = '/archive/u1/uaf/kate/COBALT/'
dst_dir='./'
src_grd = pyroms_toolbox.BGrid_GFDL.get_nc_BGrid_GFDL('/archive/u1/uaf/kate/COBALT/GFDL_CM2.1_grid.nc', name='ESM2M_NWGOA3')
dst_grd = pyroms.grid.get_ROMS_grid('NWGOA3')
# define all tracer stuff
list_tracer = ['alk', 'cadet_arag', 'cadet_calc', 'dic', 'fed', 'fedet', 'fedi', 'felg', 'fesm', 'ldon', 'ldop', 'lith', 'lithdet', 'nbact', 'ndet', 'ndi', 'nlg', 'nsm', 'nh4', 'no3', 'o2', 'pdet', 'po4', 'srdon', 'srdop', 'sldon', 'sldop', 'sidet', 'silg', 'sio4', 'nsmz', 'nmdz', 'nlgz']
tracer_longname = ['Alkalinity', 'Detrital CaCO3', 'Detrital CaCO3', 'Dissolved Inorganic Carbon', 'Dissolved Iron', 'Detrital Iron', 'Diazotroph Iron', 'Large Phytoplankton Iron', 'Small Phytoplankton Iron', 'labile DON', 'labile DOP', 'Lithogenic Aluminosilicate', 'lithdet', 'bacterial', 'ndet', 'Diazotroph Nitrogen', 'Large Phytoplankton Nitrogen', 'Small Phytoplankton Nitrogen', 'Ammonia', 'Nitrate', 'Oxygen', 'Detrital Phosphorus', 'Phosphate', 'Semi-Refractory DON', 'Semi-Refractory DOP', 'Semilabile DON', 'Semilabile DOP', 'Detrital Silicon', 'Large Phytoplankton Silicon', 'Silicate', 'Small Zooplankton Nitrogen', 'Medium-sized zooplankton Nitrogen', 'large Zooplankton Nitrogen']
tracer_units = ['mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'g/kg', 'g/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg']
#------- WOA13 ---------------------------------
id_tracer_update_woa = [19,20,22,29]
list_tracer_update_woa = []
tracer_longname_update_woa = []
tracer_units_update_woa = []
for idtra in id_tracer_update_woa:
print list_tracer[idtra]
for idtra in id_tracer_update_woa:
# add to tracer update
list_tracer_update_woa.append(list_tracer[idtra])
tracer_longname_update_woa.append(tracer_longname[idtra])
tracer_units_update_woa.append(tracer_units[idtra])
for mm in np.arange(12):
clim_file = dst_dir + dst_grd.name + '_clim_bio_GFDL+WOA+GLODAP_m' + str(mm+1).zfill(2) + '.nc'
print '\nBuild CLIM file for month', mm
for ktr in np.arange(len(list_tracer_update_woa)):
ctra = list_tracer_update_woa[ktr]
if ctra == 'sio4':
ctra = 'si'
mydict = {'tracer':list_tracer_update_woa[ktr],'longname':tracer_longname_update_woa[ktr],'units':tracer_units_update_woa[ktr],'file':data_dir_woa + ctra + '_WOA13-CM2.1_monthly.nc', \
'frame':mm}
remap_bio_woa(mydict, src_grd, dst_grd, dst_dir=dst_dir)
out_file = dst_dir + dst_grd.name + '_clim_bio_' + list_tracer_update_woa[ktr] + '.nc'
command = ('ncks', '-a', '-A', out_file, clim_file)
subprocess.check_call(command)
os.remove(out_file)
#--------- GLODAP -------------------------------
id_tracer_update_glodap = [0,3]
list_tracer_update_glodap = []
tracer_longname_update_glodap = []
tracer_units_update_glodap = []
for idtra in id_tracer_update_glodap:
print list_tracer[idtra]
for idtra in id_tracer_update_glodap:
# add to tracer update
list_tracer_update_glodap.append(list_tracer[idtra])
tracer_longname_update_glodap.append(tracer_longname[idtra])
tracer_units_update_glodap.append(tracer_units[idtra])
for mm in np.arange(12):
clim_file = dst_dir + dst_grd.name + '_clim_bio_GFDL+WOA+GLODAP_m' + str(mm+1).zfill(2) + '.nc'
print '\nBuild CLIM file for month', mm
for ktr in np.arange(len(list_tracer_update_glodap)):
ctra = list_tracer_update_glodap[ktr]
mydict = {'tracer':list_tracer_update_glodap[ktr],'longname':tracer_longname_update_glodap[ktr],'units':tracer_units_update_glodap[ktr],'file':data_dir_glodap + ctra + '_GLODAP-ESM2M_annual.nc', \
'frame':mm}
remap_bio_glodap(mydict, src_grd, dst_grd, dst_dir=dst_dir)
out_file = dst_dir + dst_grd.name + '_clim_bio_' + list_tracer_update_glodap[ktr] + '.nc'
command = ('ncks', '-a', '-A', out_file, clim_file)
subprocess.check_call(command)
os.remove(out_file)
| bsd-3-clause | 6,384,273,527,730,413,000 | 51.470588 | 695 | 0.64148 | false | 2.505618 | false | false | false |
Seeker1911/talent | server/talent/migrations/0002_auto_20160919_1522.py | 1 | 1432 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-19 15:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('talent', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='musicians',
name='bio',
field=models.TextField(blank=True, default=None, max_length=500, null=True),
),
migrations.AlterField(
model_name='musicians',
name='company',
field=models.CharField(blank=True, default=None, max_length=200, null=True),
),
migrations.AlterField(
model_name='musicians',
name='genre',
field=models.CharField(blank=True, default=None, max_length=200, null=True),
),
migrations.AlterField(
model_name='musicians',
name='location',
field=models.CharField(blank=True, default=None, max_length=30, null=True),
),
migrations.AlterField(
model_name='musicians',
name='phone',
field=models.CharField(default=None, max_length=15, null=True),
),
migrations.AlterField(
model_name='musicians',
name='social',
field=models.CharField(blank=True, default=None, max_length=200, null=True),
),
]
| mit | 4,036,881,197,054,677,000 | 30.822222 | 88 | 0.569832 | false | 4.045198 | false | false | false |
CityofPittsburgh/pittsburgh-purchasing-suite | purchasing_test/integration/conductor/test_conductor.py | 1 | 39808 | # -*- coding: utf-8 -*-
import json
import datetime
import urllib2
from mock import Mock, patch
from flask import session
from werkzeug.datastructures import ImmutableMultiDict
from purchasing.users.models import User
from purchasing.data.contracts import ContractBase
from purchasing.data.contract_stages import ContractStage, ContractStageActionItem
from purchasing.data.stages import Stage
from purchasing.data.flows import Flow
from purchasing.opportunities.models import Opportunity
from purchasing.extensions import mail
from purchasing.conductor.util import assign_a_contract
from purchasing_test.factories import ContractTypeFactory, DepartmentFactory, CategoryFactory
from purchasing_test.test_base import BaseTestCase
from purchasing_test.util import (
insert_a_contract, insert_a_stage, insert_a_flow,
insert_a_role, insert_a_user
)
class TestConductorSetup(BaseTestCase):
def setUp(self):
super(TestConductorSetup, self).setUp()
# create a conductor and general staff person
self.county_type = ContractTypeFactory.create(**{
'name': 'County', 'allow_opportunities': True, 'managed_by_conductor': True
})
self.department = DepartmentFactory.create(**{'name': 'test department'})
self.conductor_role_id = insert_a_role('conductor')
self.staff_role_id = insert_a_role('staff')
self.conductor = insert_a_user(email='[email protected]', role=self.conductor_role_id)
self.staff = insert_a_user(email='[email protected]', role=self.staff_role_id)
self.conductor2 = insert_a_user(email='[email protected]', role=self.conductor_role_id)
# create three stages, and set up a flow between them
self.stage1 = insert_a_stage(
name='stage1', send_notifs=True, post_opportunities=True,
default_message='i am a default message'
)
self.stage2 = insert_a_stage(name='stage2', send_notifs=True, post_opportunities=False)
self.stage3 = insert_a_stage(name='stage3', send_notifs=False, post_opportunities=False)
self.flow = insert_a_flow(stage_ids=[self.stage1.id, self.stage2.id, self.stage3.id])
self.flow2 = insert_a_flow(name='test2', stage_ids=[self.stage1.id, self.stage3.id, self.stage2.id])
self.simple_flow = insert_a_flow(name='simple', stage_ids=[self.stage1.id])
# create two contracts
self.contract1 = insert_a_contract(
contract_type=self.county_type, description='scuba supplies', financial_id=123,
expiration_date=datetime.date.today(), properties=[{'key': 'Spec Number', 'value': '123'}],
is_visible=True, department=self.department, has_metrics=True
)
self.contract2 = insert_a_contract(
contract_type=self.county_type, description='scuba repair', financial_id=456,
expiration_date=datetime.date.today() + datetime.timedelta(120),
properties=[{'key': 'Spec Number', 'value': '456'}],
is_visible=True, has_metrics=True
)
self.category = CategoryFactory.create()
self.login_user(self.conductor)
self.detail_view = '/conductor/contract/{}/stage/{}'
self.transition_view = '/conductor/contract/{}/stage/{}/'
def assign_contract(self, flow=None, contract=None, start_time=None):
flow = flow if flow else self.flow
contract = contract if contract else self.contract1
start_time = start_time if start_time else datetime.datetime.now()
assign_a_contract(contract, flow, self.conductor, start_time=start_time)
return contract.children[0]
def get_current_contract_stage_id(self, contract, old_stage=None):
if contract.current_stage_id is None:
return -1
if not old_stage:
stage = ContractStage.query.filter(
contract.current_stage_id == ContractStage.stage_id,
contract.id == ContractStage.contract_id
).first()
else:
stage = ContractStage.query.filter(
old_stage.id == ContractStage.stage_id,
contract.id == ContractStage.contract_id
).first()
return stage.id
def build_detail_view(self, contract, old_stage=None):
contract = contract.children[0] if len(contract.children) > 0 else contract
return self.detail_view.format(
contract.id, self.get_current_contract_stage_id(contract, old_stage)
)
def tearDown(self):
super(TestConductorSetup, self).tearDown()
session.clear()
class TestConductor(TestConductorSetup):
render_templates = True
def test_conductor_contract_list(self):
index_view = self.client.get('/conductor', follow_redirects=True)
self.assert200(index_view)
self.assert_template_used('conductor/index.html')
# we have 2 contracts
_all = self.get_context_variable('_all')
self.assertEquals(len(_all), 2)
# we can't get to the page normally
self.logout_user()
index_view = self.client.get('/conductor', follow_redirects=True)
self.assert200(index_view)
# it should redirect us to the home page
self.assert_template_used('public/home.html')
self.login_user(self.staff)
index_view = self.client.get('/conductor', follow_redirects=True)
self.assert200(index_view)
# it should redirect us to the home page
self.assert_template_used('public/home.html')
def test_conductor_start_new(self):
self.assertEquals(ContractStage.query.count(), 0)
self.assert200(self.client.get('/conductor/contract/new'))
self.client.post('/conductor/contract/new', data={
'description': 'totally new wow', 'flow': self.flow.id,
'assigned': self.conductor.id, 'department': self.department.id
})
self.assertEquals(ContractStage.query.count(), len(self.flow.stage_order))
self.assertEquals(
ContractBase.query.filter(ContractBase.description == 'totally new wow').first().current_stage_id,
self.flow.stage_order[0]
)
self.assertEquals(ContractBase.query.count(), 3)
def test_conductor_start_existing(self):
start_work_url = '/conductor/contract/{}/start'.format(self.contract1.id)
old_contract_id = self.contract1.id
old_description = self.contract1.description
self.assertEquals(ContractStage.query.count(), 0)
self.assert200(self.client.get(start_work_url))
self.assertEquals(self.get_context_variable('form').description.data, self.contract1.description)
self.client.post(start_work_url, data={
'description': 'updated!', 'flow': self.flow.id,
'assigned': self.conductor.id, 'department': self.department.id
})
old_contract = ContractBase.query.get(old_contract_id)
new_contract = ContractBase.query.get(old_contract_id).children[0]
self.assertEquals(old_contract.description, old_description)
self.assertEquals(new_contract.description, 'updated!')
self.assertEquals(ContractStage.query.count(), len(self.flow.stage_order))
self.assertEquals(new_contract.current_stage_id, self.flow.stage_order[0])
self.assertEquals(ContractBase.query.count(), 3)
def test_conductor_modify_start(self):
assign = self.assign_contract()
start_url = '/conductor/contract/{}/start'.format(self.contract1.id)
# should load successfully on first stage
self.assert200(self.client.get(start_url))
self.client.post(start_url, data={
'description': 'totally new wow', 'flow': self.flow.id,
'assigned': self.conductor.id, 'department': self.department.id,
'start': datetime.datetime.now() - datetime.timedelta(1)
})
transition_url = self.build_detail_view(assign) + '/transition'
self.client.get(transition_url)
# should redirect on not-first stage
self.assertTrue(self.client.get(start_url).status_code, 302)
def test_conductor_contract_assign(self):
self.assertEquals(ContractStage.query.count(), 0)
assign = self.assign_contract()
self.assertEquals(ContractStage.query.count(), len(self.flow.stage_order))
self.assertEquals(assign.current_stage_id, self.flow.stage_order[0])
self.assertEquals(assign.assigned_to, self.conductor.id)
# re-assigning shouldn't cause problems
self.assign_contract()
def test_conductor_assign_unstarted_contract(self):
self.client.get('/conductor/contract/{}/assign/{}'.format(
self.contract1.id, self.staff.id
))
self.assert_flashes(
'That user does not have the right permissions to be assigned a contract', 'alert-danger'
)
self.assertTrue(self.contract1.assigned is None)
self.client.get('/conductor/contract/{}/assign/{}'.format(
self.contract1.id, self.conductor.id
))
self.assert_flashes(
'Successfully assigned {} to {}!'.format(self.contract1.description, self.conductor.email),
'alert-success'
)
self.assertTrue(self.contract1.assigned is not None)
self.assertEquals(self.contract1.assigned, self.conductor)
def test_conductor_reassign_in_progress(self):
self.assign_contract(contract=self.contract1)
self.client.get('/conductor/contract/{}/assign/{}'.format(
self.contract1.id, self.conductor2.id
))
self.assert_flashes(
'Successfully assigned {} to {}!'.format(self.contract1.description, self.conductor2.email),
'alert-success'
)
self.assertTrue(self.contract1.assigned is not None)
self.assertEquals(self.contract1.assigned, self.conductor2)
def test_conductor_contract_detail_view(self):
self.assert404(self.client.get(self.detail_view.format(999, 999)))
assign = self.assign_contract()
detail_view_url = self.build_detail_view(assign)
detail = self.client.get(self.build_detail_view(assign))
self.assert200(detail)
self.assert_template_used('conductor/detail.html')
self.assertEquals(self.get_context_variable('active_tab'), '#activity')
self.assertEquals(
self.get_context_variable('current_stage').id,
self.get_context_variable('active_stage').id
)
self.assertEquals(len(self.get_context_variable('actions')), 1)
# make sure the redirect works
redir = self.client.get('/conductor/contract/{}'.format(assign.id))
self.assertEquals(redir.status_code, 302)
self.assertEquals(redir.location, 'http://localhost' + detail_view_url)
self.logout_user()
# make sure we can't get to it unless we are the right user
detail = self.client.get(detail_view_url, follow_redirects=True)
self.assert200(detail)
# it should redirect us to the home page
self.assert_template_used('public/home.html')
self.login_user(self.staff)
detail = self.client.get(detail_view_url, follow_redirects=True)
self.assert200(detail)
# it should redirect us to the home page
self.assert_template_used('public/home.html')
def test_conductor_contract_transition(self):
assign = self.assign_contract()
transition_url = self.build_detail_view(assign) + '/transition'
transition = self.client.get(transition_url)
self.assertEquals(transition.status_code, 302)
new_page = self.client.get(self.build_detail_view(assign))
self.assertTrue('<a href="#post" aria-controls="post" role="tab" data-toggle="tab">' not in new_page.data)
contract_stages = ContractStage.query.all()
for stage in contract_stages:
if stage.stage_id == self.stage1.id:
self.assertTrue(stage.entered is not None and stage.exited is not None)
elif stage.stage_id == self.stage2.id:
self.assertTrue(stage.entered is not None and stage.exited is None)
elif stage.stage_id == self.stage3.id:
self.assertTrue(stage.entered is None and stage.exited is None)
def test_conductor_auto_fix_dates(self):
two_days_ago = datetime.datetime.now() - datetime.timedelta(days=2)
assign = self.assign_contract(start_time=two_days_ago)
transition_url = self.build_detail_view(assign) + '/transition'
self.client.post(transition_url, data={'complete': two_days_ago})
self.client.post(transition_url, data={'complete': two_days_ago})
revert_url = self.build_detail_view(assign) + '/transition?destination={}'
self.client.post(revert_url.format(self.stage2.id), data={
'complete': datetime.datetime.now() - datetime.timedelta(days=1)
})
contract_stage_1 = ContractStage.query.filter(ContractStage.stage_id == self.stage1.id).first()
contract_stage_2 = ContractStage.query.filter(ContractStage.stage_id == self.stage2.id).first()
self.assertNotEquals(contract_stage_1.exited, contract_stage_2.entered)
self.client.get(transition_url)
self.assertEquals(contract_stage_1.exited, contract_stage_2.entered)
def test_conductor_transition_complete_date_validation(self):
assign = self.assign_contract()
transition_url = self.build_detail_view(assign) + '/transition'
early = self.client.post(transition_url, data={
'complete': datetime.datetime.now() - datetime.timedelta(days=1)
}, follow_redirects=True)
self.assertTrue('Invalid date (before step start)' in early.data)
late = self.client.post(transition_url, data={
'complete': datetime.datetime.now() + datetime.timedelta(days=1)
}, follow_redirects=True)
self.assertTrue('Invalid date (in future)' in late.data)
contract_stages = ContractStage.query.all()
for stage in contract_stages:
if stage.id == self.stage1.id:
self.assertTrue(stage.entered is not None and stage.exited is not None)
elif stage.id == self.stage2.id:
self.assertTrue(stage.entered is not None and stage.exited is None)
elif stage.id == self.stage3.id:
self.assertTrue(stage.entered is None and stage.exited is None)
def test_conductor_directed_transition(self):
assign = self.assign_contract()
self.assertEquals(ContractStageActionItem.query.count(), 1)
# transition to the third stage
transition_url = self.build_detail_view(assign) + '/transition'
self.client.get(transition_url)
self.assertEquals(ContractStageActionItem.query.count(), 3)
self.client.get(transition_url)
self.assertEquals(ContractStageActionItem.query.count(), 5)
self.assertEquals(assign.current_stage_id, self.stage3.id)
revert_url = self.build_detail_view(assign) + '/transition?destination={}'
# revert to the original stage
self.client.get(revert_url.format(self.stage1.id))
self.assertEquals(ContractStageActionItem.query.count(), 6)
self.assertEquals(assign.current_stage_id, self.stage1.id)
self.assertTrue(ContractStage.query.filter(ContractStage.stage_id == self.stage1.id).first().entered is not None)
self.assertTrue(ContractStage.query.filter(ContractStage.stage_id == self.stage2.id).first().entered is None)
self.assertTrue(ContractStage.query.filter(ContractStage.stage_id == self.stage3.id).first().entered is None)
self.assertTrue(ContractStage.query.filter(ContractStage.stage_id == self.stage1.id).first().exited is None)
self.assertTrue(ContractStage.query.filter(ContractStage.stage_id == self.stage2.id).first().exited is None)
self.assertTrue(ContractStage.query.filter(ContractStage.stage_id == self.stage3.id).first().exited is None)
def test_conductor_link_directions(self):
assign = self.assign_contract()
self.client.get(self.detail_view.format(assign.id, assign.get_current_stage().id) + '/transition')
# assert the current stage is stage 2
redir = self.client.get('/conductor/contract/{}'.format(assign.id))
self.assertEquals(redir.status_code, 302)
self.assertEquals(redir.location, 'http://localhost' + self.build_detail_view(assign))
# assert we can/can't go the correct locations
old_view = self.client.get(self.build_detail_view(assign, old_stage=self.stage1))
self.assert200(old_view)
self.assertTrue('This stage has been completed.' in old_view.data)
self.assert200(self.client.get(self.build_detail_view(assign, old_stage=self.stage2)))
self.assert404(self.client.get(self.build_detail_view(assign, old_stage=self.stage3)))
def test_conductor_flow_switching(self):
assign = self.assign_contract()
self.client.get(self.detail_view.format(assign.id, assign.get_current_stage().id) + '/transition')
# we should have three actions -- entered, exited, entered
self.assertEquals(ContractStageActionItem.query.count(), 3)
self.client.get(self.detail_view.format(assign.id, self.stage2.id) +
'/flow-switch/{}'.format(self.flow2.id))
# assert that we have been updated appropriately
self.assertEquals(assign.flow_id, self.flow2.id)
self.assertEquals(assign.current_stage_id, self.flow2.stage_order[0])
# assert that the action log has been properly cleaned
new_actions = ContractStageActionItem.query.all()
self.assertEquals(len(new_actions), 2)
flow_switch_action, entered_action = 0, 0
for i in new_actions:
if i.action_type == 'entered':
entered_action += 1
elif i.action_type == 'flow_switch':
flow_switch_action += 1
self.assertEquals(entered_action, 1)
self.assertEquals(flow_switch_action, 1)
# assert that the old contract stages from the previous flow
# have had their enter/exit times cleared
old_stages = ContractStage.query.filter(
ContractStage.flow_id == self.flow.id,
ContractStage.contract_id == assign.id
).all()
for i in old_stages:
self.assertTrue(i.entered is None)
self.assertTrue(i.exited is None)
# assert that you can transition back to the original flow
current_stage = ContractStage.query.filter(
ContractStage.stage_id == assign.current_stage_id,
ContractStage.contract_id == assign.id,
ContractStage.flow_id == assign.flow_id
).first()
# switch back to the first stage
self.client.get(
self.detail_view.format(assign.id, current_stage.id) +
'/flow-switch/{}'.format(self.flow.id)
)
# assert that our contract properties work as expected
self.assertEquals(assign.flow_id, self.flow.id)
self.assertEquals(assign.current_stage_id, self.flow.stage_order[0])
# assert that the actions were logged correctly
new_actions = ContractStageActionItem.query.all()
self.assertEquals(len(new_actions), 3)
flow_switch_action, entered_action, restarted_action = 0, 0, 0
for i in new_actions:
if i.action_type == 'entered':
entered_action += 1
elif i.action_type == 'flow_switch':
flow_switch_action += 1
elif i.action_type == 'restarted':
restarted_action += 1
self.assertEquals(entered_action, 1)
self.assertEquals(flow_switch_action, 2)
self.assertEquals(restarted_action, 0)
@patch('urllib2.urlopen')
def test_url_validation(self, urlopen):
mock_open = Mock()
mock_open.getcode.side_effect = [
200,
urllib2.HTTPError('', 404, 'broken', {}, file),
urllib2.URLError('')
]
urlopen.return_value = mock_open
post_url = '/conductor/contract/{}/edit/url-exists'.format(self.contract1.id)
post1 = self.client.post(
post_url, data=json.dumps(dict(no_url='')),
headers={'Content-Type': 'application/json;charset=UTF-8'}
)
self.assertEquals(json.loads(post1.data).get('status'), 404)
post2 = self.client.post(
post_url, data=json.dumps(dict(url='works')),
headers={'Content-Type': 'application/json;charset=UTF-8'}
)
self.assertEquals(json.loads(post2.data).get('status'), 200)
post3 = self.client.post(
post_url, data=json.dumps(dict(url='doesnotwork')),
headers={'Content-Type': 'application/json;charset=UTF-8'}
)
self.assertEquals(json.loads(post3.data).get('status'), 404)
post4 = self.client.post(
post_url, data=json.dumps(dict(url='doesnotwork')),
headers={'Content-Type': 'application/json;charset=UTF-8'}
)
self.assertEquals(json.loads(post4.data).get('status'), 500)
def test_conductor_contract_post_note(self):
assign = self.assign_contract()
self.assertEquals(ContractStageActionItem.query.count(), 1)
detail_view_url = self.build_detail_view(assign)
self.client.post(detail_view_url + '?form=activity', data=dict(
note='a test note!'
))
self.assertEquals(ContractStageActionItem.query.count(), 2)
detail_view = self.client.get(detail_view_url)
self.assertEquals(len(self.get_context_variable('actions')), 2)
self.assertTrue('a test note!' in detail_view.data)
# make sure you can't post notes to an unstarted stage
self.assert404(self.client.post(
self.build_detail_view(assign, old_stage=self.stage3) + '?form=activity',
data=dict(note='a test note!')
))
# make sure you can't post a note to an unstarted contract
self.assert404(self.client.post(
self.build_detail_view(self.contract2) + '?form=activity',
data=dict(note='a test note!')
))
def test_delete_note(self):
assign = self.assign_contract()
self.assertEquals(ContractStageActionItem.query.count(), 1)
detail_view_url = self.build_detail_view(assign)
self.client.post(detail_view_url + '?form=activity', data=dict(
note='a test note!'
))
self.client.post(detail_view_url + '?form=activity', data=dict(
note='a second test note!'
))
first_note = ContractStageActionItem.query.filter(
ContractStageActionItem.action_type == 'activity'
).first()
self.assertEquals(ContractStageActionItem.query.count(), 3)
self.client.get('/conductor/contract/1/stage/1/note/{}/delete'.format(first_note.id))
self.assertEquals(ContractStageActionItem.query.count(), 2)
self.client.get('/conductor/contract/1/stage/1/note/100/delete')
self.assert_flashes("That note doesn't exist!", 'alert-warning')
self.logout_user()
# make sure you can't delete notes randomly
self.assert200(
self.client.get('/conductor/contract/1/stage/1/note/1/delete', follow_redirects=True)
)
self.assertEquals(ContractStageActionItem.query.count(), 2)
self.assert_template_used('public/home.html')
def test_conductor_stage_default_message(self):
assign = self.assign_contract()
self.assertEquals(ContractStageActionItem.query.count(), 1)
detail_view_url = self.build_detail_view(assign)
request = self.client.get(detail_view_url)
self.assertTrue('i am a default message' in request.data)
def test_conductor_send_update(self):
assign = self.assign_contract()
self.assertEquals(ContractStageActionItem.query.count(), 1)
detail_view_url = self.build_detail_view(assign)
# make sure the form validators work
bad_post = self.client.post(detail_view_url + '?form=update', data=dict(
send_to='bademail', subject='test', body='test'
), follow_redirects=True)
self.assertEquals(ContractStageActionItem.query.count(), 1)
self.assertEquals(bad_post.status_code, 200)
self.assertTrue('One of the supplied emails is invalid' in bad_post.data)
with mail.record_messages() as outbox:
good_post = self.client.post(detail_view_url + '?form=update', data=dict(
send_to='[email protected]; [email protected]', subject='test', body='test',
send_to_cc='[email protected]'
), follow_redirects=True)
self.assertEquals(len(outbox), 1)
self.assertEquals(ContractStageActionItem.query.count(), 2)
self.assertTrue('test' in outbox[0].subject)
self.assertTrue('with the subject' in good_post.data)
self.assertTrue(len(outbox[0].cc), 1)
self.assertTrue(len(outbox[0].recipients), 2)
good_post_ccs = self.client.post(detail_view_url + '?form=update', data=dict(
send_to='[email protected]', subject='test', body='test',
send_to_cc='[email protected]; [email protected]'
), follow_redirects=True)
self.assertEquals(len(outbox), 2)
self.assertEquals(ContractStageActionItem.query.count(), 3)
self.assertTrue('test' in outbox[1].subject)
self.assertTrue('with the subject' in good_post_ccs.data)
self.assertTrue(len(outbox[1].cc), 2)
self.assertTrue(len(outbox[1].recipients), 1)
def test_conductor_post_to_beacon(self):
assign = self.assign_contract()
detail_view_url = self.build_detail_view(assign)
old_view = self.client.get(detail_view_url)
self.assertTrue(self.department.name in old_view.data)
self.client.post(detail_view_url + '?form=post', data={
'contact_email': self.conductor.email, 'title': 'foobar', 'description': 'barbaz',
'planned_publish': datetime.date.today() + datetime.timedelta(1),
'planned_submission_start': datetime.date.today() + datetime.timedelta(2),
'planned_submission_end': datetime.datetime.today() + datetime.timedelta(days=2),
'department': self.department.id,
'subcategories-{}'.format(self.category.id): 'on',
'opportunity_type': self.county_type.id
})
self.assertEquals(Opportunity.query.count(), 1)
self.assertEquals(ContractStageActionItem.query.count(), 2)
detail_view = self.client.get(detail_view_url)
self.assertEquals(len(self.get_context_variable('actions')), 2)
self.assertTrue('barbaz' in detail_view.data)
def test_edit_contract_metadata(self):
assign = self.assign_contract()
detail_view_url = self.build_detail_view(assign, self.stage1)
self.client.post(detail_view_url + '?form=update-metadata', data=dict(
financial_id=999
))
self.assertEquals(ContractStageActionItem.query.count(), 2)
for i in ContractStageActionItem.query.all():
self.assertTrue(i.action_detail is not None)
self.assertEquals(assign.financial_id, '999')
def test_edit_contract_complete(self):
assign = self.assign_contract(flow=self.simple_flow)
should_redir = self.client.get('/conductor/contract/{}/edit/contract'.format(assign.id))
self.assertEquals(should_redir.status_code, 302)
self.assertEquals(
should_redir.location,
'http://localhost/conductor/contract/{}'.format(assign.id)
)
should_redir = self.client.get('/conductor/contract/{}/edit/company'.format(assign.id))
self.assertEquals(should_redir.status_code, 302)
self.assertEquals(
should_redir.location,
'http://localhost/conductor/contract/{}/edit/contract'.format(assign.id)
)
should_redir = self.client.get('/conductor/contract/{}/edit/contacts'.format(assign.id))
self.assertEquals(should_redir.status_code, 302)
self.assertEquals(
should_redir.location,
'http://localhost/conductor/contract/{}/edit/contract'.format(assign.id)
)
def test_contract_completion_session_set(self):
with self.client as c:
assign = self.assign_contract(flow=self.simple_flow)
transition_url = self.build_detail_view(assign) + '/transition'
self.client.get(transition_url)
self.assertTrue(assign.completed_last_stage())
self.assert200(c.get('/conductor/contract/{}/edit/contract'.format(assign.id)))
c.post('conductor/contract/{}/edit/contract'.format(assign.id), data=dict(
expiration_date=datetime.date(2020, 1, 1), spec_number='abcd',
description='foo'
))
self.assertTrue(session['contract-{}'.format(assign.id)] is not None)
self.assert200(c.get('/conductor/contract/{}/edit/company'.format(assign.id)))
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u'1234'),
('companies-0-company_name', u'__None'),
('companies-0-controller_number', u''),
('companies-0-new_company_name', u'test')
]))
self.assertTrue(session['companies-{}'.format(assign.id)] is not None)
self.assert200(c.get('/conductor/contract/{}/edit/contacts'.format(assign.id)))
def test_edit_contract_form_validators(self):
with self.client as c:
assign = self.assign_contract(flow=self.simple_flow)
transition_url = self.build_detail_view(assign) + '/transition'
self.client.get(transition_url)
# set contract session variable so we can post to the company endpoint
c.post('conductor/contract/{}/edit/contract'.format(assign.id), data=dict(
expiration_date=datetime.date(2020, 1, 1), spec_number='abcd',
description='foo'
))
self.assertTrue('companies-{}'.format(assign.id) not in session.keys())
# assert you can't set both controller numbers
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u'1234'),
('companies-0-company_name', u'__None'),
('companies-0-controller_number', u'1234'),
('companies-0-new_company_name', u'')
]))
self.assertTrue('companies-{}'.format(assign.id) not in session.keys())
# assert you can't set both company names
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u''),
('companies-0-company_name', u'foobar'),
('companies-0-controller_number', u''),
('companies-0-new_company_name', u'foobar')
]))
self.assertTrue('companies-{}'.format(assign.id) not in session.keys())
# assert you can't set mismatched names/numbers
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u''),
('companies-0-company_name', u''),
('companies-0-controller_number', u'1234'),
('companies-0-new_company_name', u'foobar')
]))
self.assertTrue('companies-{}'.format(assign.id) not in session.keys())
# assert new works
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u'1234'),
('companies-0-company_name', u''),
('companies-0-controller_number', u''),
('companies-0-new_company_name', u'foobar')
]))
self.assertTrue(session['companies-{}'.format(assign.id)] is not None)
session.pop('companies-{}'.format(assign.id))
# assert old works
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u''),
('companies-0-company_name', u'foobar'),
('companies-0-controller_number', u'1234'),
('companies-0-new_company_name', u'')
]))
self.assertTrue(session['companies-{}'.format(assign.id)] is not None)
session.pop('companies-{}'.format(assign.id))
# assert multiple companies work
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u''),
('companies-0-company_name', u'foobar'),
('companies-0-controller_number', u'1234'),
('companies-0-new_company_name', u''),
('companies-1-new_company_controller_number', u'1234'),
('companies-1-company_name', u''),
('companies-1-controller_number', u''),
('companies-1-new_company_name', u'foobar2')
]))
self.assertTrue(session['companies-{}'.format(assign.id)] is not None)
session.pop('companies-{}'.format(assign.id))
def test_actual_contract_completion(self):
with self.client as c:
self.assertTrue(self.contract1.is_visible)
assign = self.assign_contract(flow=self.simple_flow)
transition_url = self.build_detail_view(assign) + '/transition'
self.client.get(transition_url)
c.post('conductor/contract/{}/edit/contract'.format(assign.id), data=dict(
expiration_date=datetime.date(2020, 1, 1), spec_number='abcd',
description='foo'
))
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u'1234'),
('companies-0-company_name', u''),
('companies-0-controller_number', u''),
('companies-0-new_company_name', u'foobar')
]))
c.post('/conductor/contract/{}/edit/contacts'.format(assign.id), data=ImmutableMultiDict([
('companies-0-contacts-0-first_name', 'foo'),
('companies-0-contacts-0-last_name', 'bar'),
('companies-0-contacts-0-phone_number', '123-456-7890'),
('companies-0-contacts-0-email', '[email protected]'),
]))
self.assertTrue(assign.parent.is_archived)
self.assertFalse(assign.parent.is_visible)
self.assertTrue(assign.is_visible)
self.assertEquals(ContractBase.query.count(), 3)
self.assertEquals(assign.description, 'foo')
self.assertEquals(assign.parent.description, 'scuba supplies [Archived]')
def test_actual_contract_completion_multi_company(self):
with self.client as c:
self.assertTrue(self.contract1.is_visible)
assign = self.assign_contract(flow=self.simple_flow)
transition_url = self.build_detail_view(assign) + '/transition'
self.client.get(transition_url)
c.post('conductor/contract/{}/edit/contract'.format(assign.id), data=dict(
expiration_date=datetime.date(2020, 1, 1), spec_number='abcd',
description='foo'
))
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u'1234'),
('companies-0-company_name', u''),
('companies-0-controller_number', u''),
('companies-0-new_company_name', u'foobar'),
('companies-2-new_company_controller_number', u'5678'),
('companies-2-company_name', u''),
('companies-2-controller_number', u''),
('companies-2-new_company_name', u'foobar3'),
('companies-1-new_company_controller_number', u'1234'),
('companies-1-company_name', u''),
('companies-1-controller_number', u''),
('companies-1-new_company_name', u'foobar2'),
]))
c.post('/conductor/contract/{}/edit/contacts'.format(assign.id), data=ImmutableMultiDict([
('companies-0-contacts-0-first_name', 'foo'),
('companies-0-contacts-0-last_name', 'bar'),
('companies-0-contacts-0-phone_number', '123-456-7890'),
('companies-0-contacts-0-email', '[email protected]'),
('companies-1-contacts-0-first_name', 'foo'),
('companies-1-contacts-0-last_name', 'bar'),
('companies-1-contacts-0-phone_number', '123-456-7890'),
('companies-1-contacts-0-email', '[email protected]'),
('companies-2-contacts-0-first_name', 'foo'),
('companies-2-contacts-0-last_name', 'bar'),
('companies-2-contacts-0-phone_number', '123-456-7890'),
('companies-2-contacts-0-email', '[email protected]'),
]))
# we should create two new contract objects
self.assertEquals(ContractBase.query.count(), 4)
self.assertTrue(assign.parent.is_archived)
self.assertFalse(assign.parent.is_visible)
# two of the contracts should be children of our parent contract
children = assign.parent.children
self.assertEquals(len(children), 2)
for child in children:
self.assertTrue(child.is_visible)
self.assertEquals(child.description, 'foo')
self.assertEquals(child.parent.description, 'scuba supplies [Archived]')
self.assertEquals(assign.assigned, child.assigned)
if child.financial_id == 1234:
self.assertEquals(len(child.companies), 2)
def test_contract_extension(self):
assign = self.assign_contract()
detail_view_url = self.build_detail_view(assign)
extend = self.client.get(detail_view_url + '/extend')
self.assertEquals(extend.status_code, 302)
self.assertEquals(
extend.location,
'http://localhost/conductor/contract/{}/edit/contract'.format(assign.parent.id)
)
extend_post = self.client.post('conductor/contract/{}/edit/contract'.format(assign.parent.id), data=dict(
expiration_date=datetime.date.today(), spec_number='1234',
description=assign.parent.description
))
self.assertEquals(extend_post.status_code, 302)
self.assertEquals(
extend_post.location,
'http://localhost/conductor/'
)
self.assertEquals(assign.parent.expiration_date, datetime.date.today())
# our child contract should be untouched
self.assertEquals(assign.current_stage_id, self.flow.stage_order[0])
self.assertTrue(assign.parent.is_visible)
self.assertFalse(assign.is_visible)
| bsd-3-clause | -1,775,423,843,746,313,700 | 44.494857 | 121 | 0.626382 | false | 3.856243 | true | false | false |
jgust/SublimeCscope | sublime_cscope/event_listener.py | 1 | 3885 |
import sublime
import sublime_plugin
from ..SublimeCscope import DEBUG
from . import indexer
# These commands should trigger a state change event in the indexer
PROJECT_COMMANDS = ('prompt_add_folder',
'prompt_open_project_or_workspace',
'prompt_switch_project_or_workspace',
'prompt_select_workspace',
'open_recent_project_or_workspace')
class EventListener(sublime_plugin.EventListener):
"""Monitors events from the editor and tries to figure out
when it is meaningful to notify the indexers"""
def __init__(self):
super().__init__()
self._curr_active_window = 0
self._project_command_in_progres = []
self._last_saved_buffer = None
self._last_closed_buffer = None
def _check_active_window(self):
curr_active_window = sublime.active_window().id()
# don't notify any change the first time
if self._curr_active_window == 0:
self._curr_active_window = curr_active_window
return False
prev_active_window = self._curr_active_window
self._curr_active_window = curr_active_window
#A change in active window can mean that a new window was created,
#a window was closed or the user switched between windows.
if prev_active_window != curr_active_window:
return True
return False
def _clear_last_saved_buffer(self):
self._last_saved_buffer = None
def _clear_last_closed_buffer(self):
self._last_closed_buffer = None
def _find_open_file(self, file_name):
for win in sublime.windows():
if win.find_open_file(file_name):
return True
return False
def on_post_save(self, view):
self._check_active_window()
file_name = view.file_name()
if not view.is_scratch() and file_name:
# ignore multiple calls for the same buffer for 1 second.
if file_name != self._last_saved_buffer:
self._last_saved_buffer = file_name
indexer.buffer_promoted(file_name)
sublime.set_timeout_async(self._clear_last_saved_buffer, 1000)
def on_close(self, view):
self._check_active_window()
file_name = view.file_name()
if not view.is_scratch() and file_name:
# only send buffer demoted if all views into the buffer have been
# closed.
if file_name != self._last_closed_buffer and not self._find_open_file(file_name):
self._last_closed_buffer = file_name
indexer.buffer_demoted(file_name)
sublime.set_timeout_async(self._clear_last_closed_buffer, 1000)
def on_activated(self, view):
focus_changed = self._check_active_window()
window_id = view.window().id() if view.window() else 0
proj_command_complete = False
if window_id in self._project_command_in_progres:
proj_command_complete = True
self._project_command_in_progres.remove(window_id)
if window_id and (focus_changed or proj_command_complete):
indexer.window_state_changed()
def on_window_command(self, win, cmd_name, args):
self._check_active_window()
if not win.id():
return
# if DEBUG:
# print("Got window command: %s" % cmd_name)
if cmd_name in PROJECT_COMMANDS:
if win.id() not in self._project_command_in_progres:
self._project_command_in_progres.append(win.id())
else:
print("Got command %s from win: %d while other already in progress")
elif cmd_name == 'refresh_folder_list':
indexer.refresh(win)
elif cmd_name == 'remove_folder':
indexer.window_state_changed()
| mit | -3,228,152,396,314,666,000 | 31.923729 | 93 | 0.599743 | false | 4.038462 | false | false | false |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/operations/_cloud_service_roles_operations.py | 1 | 8658 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class CloudServiceRolesOperations(object):
"""CloudServiceRolesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
role_name, # type: str
resource_group_name, # type: str
cloud_service_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CloudServiceRole"
"""Gets a role from a cloud service.
:param role_name: Name of the role.
:type role_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CloudServiceRole, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.CloudServiceRole
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudServiceRole"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'roleName': self._serialize.url("role_name", role_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'cloudServiceName': self._serialize.url("cloud_service_name", cloud_service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CloudServiceRole', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles/{roleName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
cloud_service_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.CloudServiceRoleListResult"]
"""Gets a list of all roles in a cloud service. Use nextLink property in the response to get the
next page of roles. Do this till nextLink is null to fetch all the roles.
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CloudServiceRoleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_03_01.models.CloudServiceRoleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudServiceRoleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'cloudServiceName': self._serialize.url("cloud_service_name", cloud_service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CloudServiceRoleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles'} # type: ignore
| mit | -5,821,989,288,854,121,000 | 45.8 | 190 | 0.639524 | false | 4.388241 | true | false | false |
lach76/scancode-toolkit | src/scancode/api.py | 2 | 6971 | #
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function, absolute_import
from collections import OrderedDict
"""
Main scanning functions.
Note: this API is unstable and still evolving.
"""
def extract_archives(location, recurse=True):
"""
Extract recursively any archives found at location and yield an iterable of
ExtractEvents.
If verbose is False, only the "done" event is returned at extraction
completion.
If verbose is True, both "start" and "done" events are returned.
"""
from extractcode.extract import extract
from extractcode import default_kinds
for xevent in extract(location, kinds=default_kinds, recurse=recurse):
yield xevent
def get_copyrights(location):
"""
Yield an iterable of dictionaries of copyright data detected in the file at
location. Each item contains a list of copyright statements and a start and
end line.
"""
from cluecode.copyrights import detect_copyrights
for copyrights, authors, _years, holders, start_line, end_line in detect_copyrights(location):
if not copyrights:
continue
result = OrderedDict()
# FIXME: we should call this copyright instead, and yield one item per statement
result['statements'] = copyrights
result['holders'] = holders
result['authors'] = authors
result['start_line'] = start_line
result['end_line'] = end_line
yield result
def get_emails(location):
"""
Yield an iterable of dictionaries of emails detected in the file at
location.
"""
from cluecode.finder import find_emails
for email, line_num in find_emails(location):
if not email:
continue
misc = OrderedDict()
misc['email'] = email
misc['start_line'] = line_num
misc['end_line'] = line_num
yield misc
def get_urls(location):
"""
Yield an iterable of dictionaries of urls detected in the file at
location.
"""
from cluecode.finder import find_urls
for urls, line_num in find_urls(location):
if not urls:
continue
misc = OrderedDict()
misc['url'] = urls
misc['start_line'] = line_num
misc['end_line'] = line_num
yield misc
DEJACODE_LICENSE_URL = 'https://enterprise.dejacode.com/license_library/Demo/{}/'
def get_licenses(location, minimum_score=100):
"""
Yield an iterable of dictionaries of license data detected in the file at
location for each detected license.
minimum_score is the minimum score threshold from 0 to 100. The default is
100 means only exact licenses will be detected. With any value below 100,
approximate license results are included. Note that the minimum length for
an approximate match is four words.
"""
from licensedcode.models import get_license
from licensedcode.detect import get_license_matches
for match in get_license_matches(location, minimum_score=minimum_score):
for license_key in match.rule.licenses:
lic = get_license(license_key)
result = OrderedDict()
result['key'] = lic.key
result['score'] = match.score
result['short_name'] = lic.short_name
result['category'] = lic.category
result['owner'] = lic.owner
result['homepage_url'] = lic.homepage_url
result['text_url'] = lic.text_urls[0] if lic.text_urls else ''
result['dejacode_url'] = DEJACODE_LICENSE_URL.format(lic.key)
result['spdx_license_key'] = lic.spdx_license_key
result['spdx_url'] = lic.spdx_url
result['start_line'] = match.query_position.start_line
result['end_line'] = match.query_position.end_line
yield result
def get_file_infos(location):
"""
Return a list of dictionaries of informations collected from the file or
directory at location.
"""
from commoncode import fileutils
from commoncode import filetype
from commoncode.hash import sha1, md5
from typecode import contenttype
T = contenttype.get_type(location)
is_file = T.is_file
is_dir = T.is_dir
infos = OrderedDict()
infos['type'] = filetype.get_type(location, short=False)
infos['name'] = fileutils.file_name(location)
infos['extension'] = is_file and fileutils.file_extension(location) or ''
infos['date'] = is_file and filetype.get_last_modified_date(location) or None
infos['size'] = T.size
infos['sha1'] = is_file and sha1(location) or None
infos['md5'] = is_file and md5(location) or None
infos['files_count'] = is_dir and filetype.get_file_count(location) or None
infos['mime_type'] = is_file and T.mimetype_file or None
infos['file_type'] = is_file and T.filetype_file or None
infos['programming_language'] = is_file and T.programming_language or None
infos['is_binary'] = is_file and T.is_binary or None
infos['is_text'] = is_file and T.is_text or None
infos['is_archive'] = is_file and T.is_archive or None
infos['is_media'] = is_file and T.is_media or None
infos['is_source'] = is_file and T.is_source or None
infos['is_script'] = is_file and T.is_script or None
return [infos]
def get_package_infos(location):
"""
Return a list of dictionaries of package information
collected from the location or an empty list.
"""
from packagedcode.recognize import recognize_packaged_archives
package = recognize_packaged_archives(location)
if not package:
return []
return [package.as_dict(simple=True)]
| apache-2.0 | 8,704,822,094,559,690,000 | 37.302198 | 98 | 0.683259 | false | 4.001722 | false | false | false |
ellezv/data_structures | src/dll.py | 1 | 2544 | """An implementation of a doubly linked list in Python."""
class Node():
"""Instantiate a node."""
def __init__(self, value=None, nxt=None, previous=None):
"""."""
self.value = value
self.next = nxt
self.previous = previous
class DbLinkedList():
"""Instantiate a doubly linked list."""
def __init__(self, value=None):
"""."""
self.head = None
self.tail = None
self.length = 0
if value:
self.push(value)
def push(self, value=None):
"""Push value to the head of dll."""
new_node = Node(value, nxt=self.head)
if self.length < 1:
self.tail = new_node
else:
self.head.previous = new_node
self.head = new_node
self.length += 1
def append(self, value):
"""Append value to the tail of dll."""
new_node = Node(value, None, self.tail)
if self.length < 1:
self.head = new_node
else:
self.tail.next = new_node
self.tail = new_node
self.length += 1
def pop(self):
"""Pop first value off of the head of dll."""
if self.head:
returned_value = self.head.value
self.head = self.head.next
self.head.previous = None
self.length -= 1
return returned_value
raise ValueError("Cannot pop from an empty list")
def shift(self):
"""Remove and return the last value of the dll."""
if self.head:
returned_value = self.tail.value
self.tail = self.tail.previous
self.tail.next = None
self.length -= 1
return returned_value
raise ValueError("Cannot shift from an empty list")
def remove(self, value):
"""Remove the value from the dll."""
curr_node = self.head
if not self.length:
raise ValueError("Cannot remove from an empty list")
else:
if curr_node.value == value:
self.pop()
else:
while curr_node is not None:
if curr_node.value == value:
curr_node.previous.next = curr_node.next
curr_node.next.previous = curr_node.previous
print("{} was removed".format(value))
return
else:
curr_node = curr_node.next
raise ValueError("{} not in the list".format(value))
| mit | 3,400,972,026,953,004,500 | 30.02439 | 68 | 0.512579 | false | 4.393782 | false | false | false |
jeske/csla | pysrc/clearsilver/odb_mysql.py | 1 | 1981 | #! /usr/bin/env python
"""
usage: %(progname)s [args]
"""
import os, sys, string, time, getopt
from log import *
import odb
import MySQLdb
class Cursor(odb.Cursor):
def insert_id(self, tablename, colname):
return self.cursor.insert_id()
class Connection(odb.Connection):
def __init__(self, host, user, passwd, db):
odb.Connection.__init__(self)
self._conn = MySQLdb.connect(host=host, user=user, passwd=passwd, db=db)
self.SQLError = MySQLdb.Error
def getConnType(self): return "mysql"
def cursor(self):
return Cursor(self._conn.cursor())
def escape(self,str):
if str is None: return None
return MySQLdb.escape_string(str)
def listTables(self, cursor):
cursor.execute("show tables")
rows = cursor.fetchall()
tables = []
for row in rows:
tables.append(row[0])
return tables
def listIndices(self, tableName, cursor):
cursor.execute("show index from %s" % tableName)
rows = cursor.fetchall()
tables = map(lambda row: row[2], rows)
return tables
def listFieldsDict(self, table_name, cursor):
sql = "show columns from %s" % table_name
cursor.execute(sql)
rows = cursor.fetchall()
columns = {}
for row in rows:
colname = row[0]
columns[colname] = row
return columns
def alterTableToMatch(self, table, cursor):
invalidAppCols, invalidDBCols = table.checkTable()
if not invalidAppCols: return
defs = []
for colname in invalidAppCols.keys():
col = table.getColumnDef(colname)
colname = col[0]
coltype = col[1]
options = col[2]
defs.append(table._colTypeToSQLType(colname, coltype, options))
defs = string.join(defs, ", ")
sql = "alter table %s add column " % table.getTableName()
sql = sql + "(" + defs + ")"
print sql
cursor.execute(sql)
def createTable(self, sql, cursor):
sql = sql + " TYPE=INNODB"
return sql
def supportsTriggers(self): return False
| bsd-2-clause | -8,139,343,470,660,586,000 | 22.034884 | 76 | 0.644119 | false | 3.5375 | false | false | false |
yk-tanigawa/2015gci | webdata/webdata1.py | 1 | 1436 | #!/usr/bin/env python
# coding: utf-8
###################################################################
# カーリルAPIを用いて,東大図書館の蔵書の貸出可否を調べるスクリプト
# 2015.05.25 yk_tani
###################################################################
import urllib2, sys, json
try: isbn = sys.argv[1] # specify ISBN in commandline argument
except: isbn = '9784873112107' # defalut value
def pp(obj): # unicode を含む dictionary を eval して表示する関数
# adapted from http://taichino.com/programming/1599
# https://github.com/taichino/prettyprint
if isinstance(obj, list) or isinstance(obj, dict):
orig = json.dumps(obj, indent=4)
print eval("u'''%s'''" % orig).encode('utf-8')
else:
print obj
appkey = 'gci2015'
systemid = 'Univ_Tokyo' # 東大図書館をデフォルトで指定
# apiからデータを取る
resp = urllib2.urlopen('http://api.calil.jp/check?appkey={%s}&isbn=4834000826&systemid=%s&format=json'%(appkey, systemid)).read()
# validなjsonに整える
resp = resp.replace('callback(', '', 1).replace(');', '' ,1)
# dictionaryに変換
data = json.loads(resp)
for b in data["books"]: # 所蔵している図書室と,貸出可能かを表示
#print type(data["books"][b][systemid]['libkey'])
#print data["books"][b][systemid]['libkey']
pp( data["books"][b][systemid]['libkey'] )
| mit | -1,285,989,182,587,320,000 | 31.102564 | 129 | 0.586262 | false | 2.489066 | false | false | false |
agravier/pycogmo | common/pynn_utils.py | 1 | 34258 | #!/usr/bin/env python2
# Copyright 2011, 2012 Alexandre Gravier (al.gravier@gmail)
# This file is part of PyCogMo.
# PyCogMo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PyCogMo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PyCogMo. If not, see <http://www.gnu.org/licenses/>.
""" Functions and classes wrapping or complementing PyNN
functionality.
"""
import csv
import itertools
import functools
from math import isnan, ceil
import magic
import math
import numpy
import operator
from PIL import Image
import pyNN.brian as pynnn
import SimPy.Simulation as sim
import types
from utils import LOGGER, is_square, splice
class InvalidFileFormatError(Exception):
def __init__(self, mime_type, mime_subtype):
self._type = mime_type
self._subtype = mime_subtype
def __str__(self):
return "%s files of type %s are not supported." % \
(self._type, self._subtype)
class InvalidMatrixShapeError(Exception):
def __init__(self, req_dim1, req_dim2, prov_dim1, prov_dim2):
self._req = req_dim1, req_dim2
self._prov = prov_dim1, prov_dim2
def __str__(self):
return ("The required input data shape should be "
"%s,%s, but the shape of the data provided is "
"%s,%s.") % (self._req[0], self._req[1], \
self._prov[0], self._prov[1])
class SimulationError(Exception):
def __init__(self, msg):
self._msg = msg
def __str__(self):
return self._msg
def presynaptic_outputs(unit, projection, t=None):
"""Returns the vector of all firing rates of units in the
presynaptic population that are connected to the given unit. The t
parameter can be set to restrict the computation to activity
younger than t units of time. The presynaptic population must have
a registered rate encoder with records."""
pre_population = projection.pre
post_population = projection.post
if unit not in post_population:
raise SimulationError("Unit not found in post-synaptic "
"population.")
unit_index = post_population.id_to_index(unit)
renc = get_rate_encoder(pre_population)
if renc.idx < 0:
raise SimulationError(
"Cannot compute presynaptic activation because the "
"rate encoder of the presynaptic population does not "
"contain any record.")
connectivity = projection.get('weight', 'array')
connectivity_to_unit = \
[(i, not math.isnan(connectivity[i][unit_index]))
for i in xrange(len(connectivity))]
rates = numpy.array(
[renc.get_rate_for_unit_index(i, t) for i, _
in itertools.ifilter((lambda v: v[1]), connectivity_to_unit)])
return rates
class Weights(object):
"""Wraps a 2D array of floating-point numbers that has the same
dimensions as the connectivity matrix between the two populations
of neurons connected. Non-connected units i and j have
weights[i][j] == NaN. Initial weights should be input (and are
internally stored) in nA or micro-Siemens. As they need to be
normalized for the purpose of learning, max_weight needs to be
provided. It is a model-specific and should reflect the maximum
conductance of a synapse/group of synpatic connections from one
cell to the other. It is the physical value corresponding to the
normalized weight value of 1 between 2 cells.
All methods and properties return normalized weights unless
specified otherwise."""
def __init__(self, weights_array, max_weight):
self._max_weight = max_weight * 1.
self._weights = numpy.array(weights_array) / self._max_weight
self._update_shape()
# TODO: use max_weight for hard bounding here and make soft bounded
# learning functions in nettraining.
def __eq__(self, other):
internalw = None
if isinstance(other, numpy.ndarray):
internalw = other
elif isinstance(other, list):
internalw = numpy.array(other)
elif not isinstance(other, Weights):
return False
else:
if other.max_weight != self.max_weight:
return False
internalw = other.non_normalized_numpy_weights
if len(numpy.atleast_1d(self.non_normalized_numpy_weights)) != \
len(numpy.atleast_1d(internalw)):
return False
n_r = len(self.non_normalized_numpy_weights)
for i in xrange(n_r):
l = len(numpy.atleast_1d(self.non_normalized_numpy_weights[i]))
if l != len(numpy.atleast_1d(internalw[i])):
return False
for j in xrange(l):
v1 = self.non_normalized_numpy_weights[i][j]
v2 = internalw[i][j]
if (isnan(v1) and isnan(v2)):
continue
if v1 != v2:
return False
return True
def _update_shape(self):
shape = self._weights.shape
self._dim1 = shape[0]
if len(shape) > 1:
self._dim2 = shape[1]
else:
self._dim2 = 0
@property
def max_weight(self):
return self._max_weight
@property
def shape(self):
return self._dim1, self._dim2
@property
def non_normalized_weights(self):
return (self._weights * self._max_weight).tolist()
@non_normalized_weights.setter
def non_normalized_weights(self, weights_array):
if isinstance(weights_array, numpy.ndarray):
self._weights = weights_array / self._max_weight
elif isinstance(weights_array, list):
self._weights = numpy.array(weights_array) / self._max_weight
elif isinstance(weights_array, Weights):
self._weights = weights_array.normalized_numpy_weights
else:
raise TypeError("Weights can be assigned to "
"numpy.ndarray, common.pynn_utils.Weights,"
" or list types.")
self._update_shape()
@property
def flat_non_normalized_weights(self):
return list(itertools.chain.from_iterable((self._weights * self._max_weight).tolist()))
@flat_non_normalized_weights.setter
def flat_non_normalized_weights(self, w):
wr = numpy.reshape(w, self.shape)
self._weights = wr / self._max_weight
@property
def non_normalized_numpy_weights(self):
return self._weights * self._max_weight
@non_normalized_numpy_weights.setter
def non_normalized_numpy_weights(self, w):
self._weights = w / self._max_weight
self._update_shape()
@property
def normalized_numpy_weights(self):
return self._weights
@normalized_numpy_weights.setter
def normalized_numpy_weights(self, w):
self._weights = w
self._update_shape()
def __getitem__(self, i):
return self._weights[i]
def set_normalized_weight(self, i, j, w):
self._weights[i][j] = w
def set_non_normalized_weight(self, i, j, w):
self._weights[i][j] = w / self._max_weight
def _apply_binary_scalar_operator(self, operator, other):
oshape = None
r = None
try:
if isinstance(other, Weights):
if other._max_weight != self._max_weight:
ValueError("Operation not possible as operands have "
"incompatible maximum conductances.")
oshape = other.shape
r = numpy.array(self._weights)
else:
if isinstance(other, list):
oshape = numpy.shape(other)
elif not hasattr(initializer, '__getitem__'):
raise TypeError("Second operand could not be interpreted "
"as an array of weights.")
if oshape != None and oshape != self.shape:
raise IndexError
if r == None:
r = numpy.zeros(oshape)
for x in xrange(self._dim1):
for y in xrange(self._dim2):
r[x][y] = operator(self._weights[x][y], other[x][y])
else:
r -= other._weights
except IndexError:
raise ValueError("Operation not possible as operands have "
"incompatible shapes.")
w = Weights([0], max_weight=self._max_weight)
w._dim1, w._dim2 = self.shape
w._weights = r
return w
def __add__(self, other):
return self._apply_binary_scalar_operator(operator.add, other)
def __sub__(self, other):
return self._apply_binary_scalar_operator(operator.sub, other)
def __radd__(self, other):
return self._apply_binary_scalar_operator(operator.add, other)
def __rsub__(self, other):
return self._apply_binary_scalar_operator(lambda a, b: b - a, other)
def get_normalized_weights_vector(self, target_idx):
"""Returns the weights vector to unit target_idx (target unit
index in target population). NaNs (weights of connections from
non-connected units) are omitted."""
w_with_nans = [self._weights[i][target_idx]
for i in xrange(self._dim1)]
return list(itertools.ifilterfalse(math.isnan, w_with_nans))
def set_normalized_weights_vector(self, target_idx, weights):
"""Sets the weights vector to unit target_idx (target unit
index in target population). The weight vector should have as
many elements as connected units (no NaN allowed)."""
wi = 0
try:
for i in xrange(self._dim1):
if not math.isnan(self._weights[i][target_idx]):
self._weights[i][target_idx] = weights[wi]
wi += 1
except IndexError:
raise SimulationError("Dimension mismatch (not enough elements "
"to assign to weights vector).")
if wi < len(weights):
raise SimulationError("Dimension mismatch (too many elements "
"to assign to weights vector).")
def __repr__(self):
"Prints the weights, mostly for debug purposes"
old_printopt = numpy.get_printoptions()
try:
import sys
numpy.set_printoptions(threshold=sys.maxint, suppress=True)
import os
rows, columns = map(int, os.popen('stty size', 'r').read().split())
r = "Weights(weights_array= \\\n%s, max_weight=%r)" % \
(numpy.array_str(a=self._weights,
max_line_width=columns-5,
precision=2),
self._max_weight)
finally:
numpy.set_printoptions(**old_printopt)
return r
def get_weights(proj, max_weight):
"""Returns a Weights object with the values of the weights of the
projection. Use max_w to setup the maximal conductance in micro-S
or current in nA."""
return Weights(proj.getWeights(format='array'), max_weight=max_weight)
def set_weights(proj, w):
"""Sets the weights of the projection to the internal (non-normalized)
values in w."""
if isinstance(w, Weights):
proj.setWeights(w.flat_non_normalized_weights)
else:
raise TypeError("Requires an argument of class Weights.")
def read_input_data(file_path, dim1, dim2, m=None):
"""The libmagic file identifier can be passed as argument m (used for
testing)."""
if m == None:
m = magic.Magic(mime=True)
mime = m.from_file(file_path)
mime = mime.lower().split('/')
float_array = None
if mime[0] == 'image':
float_array = read_image_data(file_path)
elif mime[0] == 'text':
if mime[1] == 'plain':
float_array = read_csv_data(file_path)
else:
raise InvalidFileFormatError(mime[0], mime[1])
else:
raise InvalidFileFormatError(mime[0], mime[1])
verify_input_array(float_array, dim1, dim2)
return float_array
def read_image_data(file_path):
"""Raises IOError if the file is not an image."""
im = Image.open(file_path)
# if im.size != (dim1, dim2):
# raise InvalidMatrixShapeError((dim1, dim2), im.size)
byte_array = numpy.array(im.convert("L")) # grayscale, [0 255]
norm_array = byte_array / 255.
return norm_array
def read_csv_data(file_path):
"""Raises IOError if the file is not a CSV file."""
float_array = []
try:
with open(file_path, 'rb') as f:
row_reader = csv.reader(f)
for r in itertools.ifilter(None, row_reader):
float_array.append(map(float, r))
return numpy.array(float_array)
except ValueError as e:
raise IOError(str(e))
def verify_input_array(float_array, dim1, dim2):
d1 = len(float_array)
if d1 != dim1:
raise InvalidMatrixShapeError(dim1, dim2, d1, "unkown")
for r in float_array:
d2 = len(r)
if d2 != dim2:
raise InvalidMatrixShapeError(dim1, dim2, d1, d2)
real = numpy.isreal(r)
if not isinstance(real, bool):
real = real.all()
if not real: # row test
raise TypeError("The input array contains invalid data.")
class InputSample(object):
"""Wraps a 2D array of normalized floating-point numbers that has
the same dimensions as the InputLayer to which it is
presented. The data can be an array, or copied from an object with
[][] accessor, loaded from a file, uniformly initialized to the
same value, or initialized by a user-provided function."""
# implement an [][] accessor
def __init__(self, dim1, dim2, initializer, expand=True):
"""The initializer can be an array, an object with [][]
accessor, a file path (string), a single floating point number
within [0,1] (the array is uniformly initialized to the same
value), or a user-provided callable that takes two integers x
and y in [0, dim1[ and [0, dim2[ respectively, and returns the
value to be stored in the array at [x][y]. The optional
parameter expand affects the case where the initializer is a
callable, an object with __getitem__, or a single number. In
those case, setting expand to False prevents the
precomputation of the whole array, and the InputSample
accessor encapsulate the function call, the object accessor,
or always returns the given number. If expand is True, the
InputSample created is mutable. If expand is False, the
InputSample is immutable."""
self._array = []
self._getitem = lambda k: self._array[k]
self._setitem = self._assign_to_array
if isinstance(initializer, basestring):
try:
self._array = read_input_data(initializer, dim1, dim2)
except IOError as e:
LOGGER.error("Could not read file %s.", initializer)
raise e
elif isinstance(initializer, types.FileType):
raise TypeError("Pass a string with the filepath to the "
"InputSample initializer, instead of a "
"file descriptor.")
elif isinstance(initializer, list):
self._array = initializer
elif hasattr(initializer, '__getitem__'):
if expand:
for x in xrange(dim1):
self._array.append([])
for y in xrange(dim2):
self._array[x].append(initializer[x][y])
else:
self._array = initializer
self._setitem = self._raise_immutable
elif hasattr(initializer, '__call__'):
# to restrict to functions:
# isinstance(initializer,
# (types.FunctionType, types.BuiltinFunctionType))
if expand:
for x in xrange(dim1):
self._array.append([])
for y in xrange(dim2):
self._array[x].append(initializer(x,y))
else:
class InitCont(object):
def __init__(self, x):
self._x = x
def __getitem__(self, y):
return initializer(self._x, y)
self._getitem = lambda x: InitCont(x)
self._setitem = self._raise_immutable
self._dim1 = dim1
self._dim2 = dim2
if expand:
verify_input_array(self._array, dim1, dim2)
def _raise_immutable(self, *args):
raise TypeError("Attempted change of state on an "
"immutable InputSample (created with "
"expand=False)")
def _assign_to_array(self, k, v):
self._array[k] = v
def __getitem__(self, k):
return self._getitem(k)
def __setitem__(self, k, v):
self._setitem(k, v)
@property
def shape(self):
return self._dim1, self._dim2
class RectilinearLayerAdapter(object):
"""Base class adapting PyNN layers."""
def __init__(self, pynn_pop, dim1, dim2):
self.unit_adapters_mat = []
for x in xrange(dim1):
self.unit_adapters_mat.append([])
for y in xrange(dim2):
self.unit_adapters_mat[x].append([None,
pynn_pop[x*dim2+y]])
self._dim1 = dim1
self._dim2 = dim2
self.pynn_population = pynn_pop
@property
def shape(self):
return self._dim1, self._dim2
def __getitem__(self, i):
return self.unit_adapters_mat[i]
def get_unit(self, i, j):
return self.unit_adapters_mat[i][j][1]
INPUT_LAYER_MAX_NAMP_DEFAULT = 100
class RectilinearInputLayer(RectilinearLayerAdapter):
"""Wraps a 2D array of electrodes with the same dimensions (dim1,
dim2) as the PyNN population in which it injects current. The
stimulation scale can be adjusted by providing the max input
amplitude in nA."""
def __init__(self, pynn_pop, dim1, dim2, max_namp=INPUT_LAYER_MAX_NAMP_DEFAULT):
super(RectilinearInputLayer, self).__init__(pynn_pop, dim1, dim2)
self.input_scaling = max_namp
# DCSources have to be recreated each time.
def apply_input(self, sample, start_time, duration,
max_namp = None, dcsource_class = pynnn.DCSource):
"""Given a sample of type InputSample and of same shape as the
input layer, and a duration, creates and connects electrodes
that apply the input specified by the input sample matrix to
the input population. A max_namp value can be specified in
nanoamperes to override the max current corresponding to an
input value of 1 given at construction time. dcsource_class is
here as a primitive dependency injection facility, for
testing."""
if max_namp == None:
max_namp = self.input_scaling
# TODO: Common current source for cells what should get the
# exact same input
for x in xrange(self._dim1):
for y in xrange(self._dim2):
# Will the GC collect the electrodes? Does PyNN delete
# them after use?
self.unit_adapters_mat[x][y][0] = \
dcsource_class({"amplitude": max_namp * sample[x][y],
"start" : start_time,
"stop" : start_time+duration})
self.unit_adapters_mat[x][y][0].inject_into(
[self.unit_adapters_mat[x][y][1]])
class RectilinearOutputRateEncoder(RectilinearLayerAdapter):
"""Keeps track of the weighted averages on a sliding window of the
output rates of all units in the topographically rectilinear
population of units. The update period and can be overridden at update
time."""
# Default width of the sliding window in simulator time units. The
# weight of past rates in activity calculation decreases linearly
# so that it is 0 when window_width old, and 1 for sim.now()
DEFAULT_WINDOW_WIDTH = 100;
DEFAULT_UPDATE_PERIOD = 10
def __init__(self, pynn_pop, dim1, dim2,
update_period = DEFAULT_UPDATE_PERIOD,
window_width=DEFAULT_WINDOW_WIDTH):
super(RectilinearOutputRateEncoder, self).__init__(pynn_pop, dim1, dim2)
self.window_width = window_width
self.update_period = update_period
# the number of records needs to be one more than requested
# because we are interested in the rate of firing, which is
# the difference in total number of spikes fired between now
# and 1 update period ago. In general, we need n+1 data points
# to determine n such differences.
self.hist_len = int(ceil(self.window_width/self.update_period)) + 1
for x in xrange(self._dim1):
for y in xrange(self._dim2):
self.unit_adapters_mat[x][y][0] = \
numpy.zeros(self.hist_len, dtype=numpy.int)
self.idx = -1
self.update_history = None # initialized at first update
def extend_capacity(self, idx):
"""Adds one cell to all logging structures at position idx, and
increments self.hist_len."""
if idx == 0:
# Edge case: extension at the end of the records
idx = self.hist_len
for x in xrange(self._dim1):
for y in xrange(self._dim2):
self.unit_adapters_mat[x][y][0] = numpy.concatenate(
(self.unit_adapters_mat[x][y][0][:idx],
[-1],
self.unit_adapters_mat[x][y][0][idx:]))
self.update_history = numpy.concatenate(
(self.update_history[:idx], [-1], self.update_history[idx:]))
self.hist_len += 1
def make_hist_weights_vec(self, update_history=None, window_width=None, idx=None):
""" Returns the ndarray of weights by which to multiply the
rates history vector to calculate the weighted recent activity
of the unit. Parameters are the update times array
(update_history), the rate averaging window width
(window_width), and the current time index in the update times
array (idx). If update_history is not provided,
self.update_history is usedIf window_width is not provided,
self.window_width is used. If idx is not provided, self.idx
is used. The weight for the oldest rate is the head of the
array. The sum of weights is 1 if the update_history array
covers at least the duration of window_width."""
if update_history == None:
update_history = self.update_history
if idx == None:
idx = self.idx
if window_width == None:
window_width = self.window_width
update_hist = numpy.append(update_history[idx+1:],
update_history[:idx+1])
update_dt = numpy.diff(update_hist)
cumsum_dt = update_dt[::-1].cumsum()[::-1] # reversed cumulative sum
last_t = update_hist[-1]
cutoff_t = last_t - window_width
l_h = 1 - cumsum_dt / (window_width * 1.)
r_h = 1 - (numpy.append(cumsum_dt[1:], [0]) / (window_width * 1.))
areas = numpy.fromiter(
itertools.imap(lambda i, x:
# in window -> area; out -> 0; border -> triangle
(l_h[i] + r_h[i]) * update_dt[i] if x <= window_width
else max(abs(r_h[i]) * (update_hist[i + 1] - cutoff_t), 0),
itertools.count(), cumsum_dt),
numpy.float)
return areas / window_width
def advance_idx(self):
self.idx = self.next_idx
@property
def next_idx(self):
return self.idx_offset(1)
@property
def last_update_time(self):
if self.update_history != None:
return self.update_history[self.idx]
return None
@property
def previous_idx(self):
return self.idx_offset(-1)
def idx_offset(self, offset):
"""Returns the value of the index with the (positive or negative)
offset added."""
return (self.idx + offset) % self.hist_len
# The data structure for the rate history of one unit is a
# circular list of rates, and an integer index (self.idx, common
# to all units) pointing to the most recent record. The size of
# this list is determined in __init__ by the window_width and
# update_period. Each unit's history is kept in the
# RectilinearLayerAdapter's unit_adapters_mat[x][y][0]. There is
# an additional circular list of updates timestamps for testing.
# We assume that the necessary recorders have been set up.
def update_rates(self, t_now):
"""t_now is the timestamp for the current rates being recorded."""
if self.idx != -1:
# Not the first update, so the state is consistent.
dt = t_now - self.update_history[self.idx]
if dt < 0:
raise SimulationError("update_rates was called with a past "
"update time. Only monotonic updates "
"are supported.")
if dt == 0.:
# It's a re-update of the current record! Let's rewind history!
self.idx = self.previous_idx
elif dt < self.update_period:
# Premature update -> we may need to increase the arrays length
# to have enough place to cover the full window width.
# The total time covered by the rate log after idx increment
# will be:
total_covered_dt = t_now - \
self.update_history[self.next_idx]
if total_covered_dt < self.window_width:
# The arrays are insufficient to cover the whole window
# width. We need to extend all arrays by one (add one entry
# to all logging structures).
self.extend_capacity(self.next_idx)
else:
# First update:
# Initialize the update times log to past values to have a
# consistent state without having to wait for the whole update
# window to have been crawled once.
self.update_history = t_now - self.update_period * \
numpy.array([0] + range(self.hist_len-1, 0, -1))
self.advance_idx()
self.update_history[self.idx] = t_now
rec = self.pynn_population.get_spike_counts();
for x in xrange(self._dim1):
for y in xrange(self._dim2):
self.unit_adapters_mat[x][y][0][self.idx] = \
rec.get(self.pynn_population[x*self._dim2+y])
def get_rates(self, t=None):
"""Returns the matrix of units weighted firing rates for the
last t time units, or for the whole window width of this rate
encoder it t is not specified."""
r = numpy.zeros((self._dim1, self._dim2), dtype=numpy.float)
for x in xrange(self._dim1):
for y in xrange(self._dim2):
r[x][y] = self.get_rate(x, y, t=t)
return r
def get_rate_for_unit_index(self, unit_index, t=None):
return self.get_rate(unit_index / self._dim1,
unit_index % self._dim2,
t=t)
def get_rate(self, x, y, t=None):
return self.f_rate(self.unit_adapters_mat[x][y][0], t=t)
def f_rate(self, np_a, t=None, update_history=None):
"""Returns the weighted average of the rates recorded in the
differences of the array np_a. The t parameter can be used to
silence rate information older than t units of time, which is
necessary to select the firing rate pertaining to one event
only. If now-t does not fall on a recording boundary, the more
recent boundary is used, otherwise the rate recording may be
contaminated by spikes older than t. If that leaves no record
available (i.e. t < age of previous record), an error is
raised.
The update_history parameter overrides the rate encoder's
update history, it should only be used for testing."""
if update_history == None:
update_history = self.update_history
update_hist = numpy.append(update_history[self.idx+1:],
update_history[:self.idx+1])
cut_i = 0
if t != None:
cut_t = sim.now() - t
cut_i = numpy.searchsorted(update_hist, cut_t, side='left')
# t must not be in the last interval:
if cut_i >= len(update_hist) - 1:
raise SimulationError("The rate encoder resolution is "
"insufficient to get any rate "
"data on the requested period.")
update_hist = update_hist[cut_i:]
update_dt = numpy.diff(update_hist) * 1.
np_a = numpy.append(np_a[self.idx+1:], np_a[:self.idx+1])
np_a = np_a[cut_i:]
rates = numpy.diff(np_a)
window_width = min(sum(update_dt), self.window_width) if t!= None \
else self.window_width
return self.make_hist_weights_vec(update_history=update_hist,
window_width=window_width,
idx=len(update_hist)
).dot(rates / update_dt)
def __repr__(self):
"Returns a string representation for debugging."
old_printopt = numpy.get_printoptions()
try:
import sys
numpy.set_printoptions(threshold=sys.maxint, suppress=True)
import os
rows, columns = map(int, os.popen('stty size', 'r').read().split())
# We don't return the rates in self.unit_adapters_mat
array_str = numpy.array_str(a=self.update_history,
max_line_width=columns-26,
precision=2) \
if self.update_history != None \
else None
r = (
"RectilinearOuputRateEncoder(\n"
" self.pynn_pop = %r\n"
" self.shape = %r\n"
" self.window_width = %r\n"
" self.update_period = %r\n"
" self.hist_len = %r\n"
" self.idx = %r\n"
" self.update_history = %s\n"
")" ) % \
(self.pynn_population,
(self._dim1, self._dim2),
self.window_width,
self.update_period,
self.hist_len,
self.idx,
array_str)
finally:
numpy.set_printoptions(**old_printopt)
return r
def rectilinear_shape(population):
try:
pos = population.positions
except Exception, e:
LOGGER.warning(("Could not retrieve units positions for population "
"%s; assuming square shape."), population.label)
if not is_square(population.size):
raise TypeError(("The shape population %s is not square and could "
"neither be retreived nor guessed."), population.label)
dim1 = dim2 = int(math.sqrt(population.size))
else:
dim1 = len(set(pos[1]))
dim2 = len(set(pos[0]))
return (dim1, dim2)
# WARNING / TODO: The following function reveals a design flaw in
# pycogmo. PyNN is insufficient and its networks should be
# encapsulated along with more metadata.
def population_adpater_provider(pop_prov_dict,
provided_class,
population):
"""Factory function providing an adapter of the specified class
for the population parameter. pop_prov_dict is a dictionary taking
a (population, provided_class) tuple as key, and returning an
instance of provided_class initialized with 3 arguments: the
population, its size in the first dimension, and its size in the
second dimension."""
key = (population, provided_class)
if pop_prov_dict.has_key(key):
return pop_prov_dict[key]
else:
LOGGER.warning("No %s for population %s, creating one.",
provided_class.__name__, population.label)
dim1, dim2 = rectilinear_shape(population)
inst = provided_class(population, dim1, dim2)
return pop_prov_dict.setdefault(key, inst)
POP_ADAPT_DICT = {}
get_input_layer = functools.partial(population_adpater_provider,
POP_ADAPT_DICT,
RectilinearInputLayer)
get_input_layer.__doc__ = ("Provides a unique input layer for the "
"given population.")
get_rate_encoder = functools.partial(population_adpater_provider,
POP_ADAPT_DICT,
RectilinearOutputRateEncoder)
get_rate_encoder.__doc__ = ("Provides a unique rectilinear output rate "
"encoder for the given population.")
def enable_recording(*p):
"""Turns on spike recorders for all populations in parameter"""
for pop in p:
pop.record(to_file=False)
| gpl-3.0 | -7,213,629,115,506,757,000 | 40.175481 | 95 | 0.580769 | false | 4.113099 | false | false | false |
mtholder/phyloplumber | getting_started.py | 1 | 4446 | #!/usr/bin/env python
import sys, os, subprocess, urllib, logging
log = logging.getLogger(__name__)
major, minor = [int(i) for i in sys.version_info[:2]]
if major != 2 or minor < 4:
sys.exit("""Please install python. Versions 2.4, 2.5, 2.6, and 2.7 should all work
If you have python installed, make sure that \"python\" is on your PATH
""")
if not os.path.exists('mydevenv/bin/activate'):
if not os.path.exists('go-pylons.py'):
try:
sys.stderr.write('Downloading go-pylons.py\n')
o = open('go-pylons.py', 'w')
o.write(urllib.urlopen('http://pylonshq.com/download/1.0/go-pylons.py').read())
o.close()
except:
raise
sys.exit("""The script needs go-pylons.py but the attempt to download it using urllib failed.
Please, download the go-pylons.py script from
http://pylonshq.com/download/1.0/go-pylons.py
and then place the downloaded script in this directory.
""")
sys.stderr.write('Running go-pylons.py\n')
if subprocess.call([sys.executable, 'go-pylons.py', '--no-site-packages', 'mydevenv']) != 0:
sys.exit(1)
if not os.path.exists('phyloplumber'):
try:
result = subprocess.call(['git', 'clone', 'git://github.com/mtholder/phyloplumber.git'])
except:
sys.exit("""The attempt to pull down the latest version of phyloplumber using git failed.
If you do not have git installed, you can download it from http://git-scm.com
If you have installed git, make sure that it is on your path.""")
if result != 0:
sys.exit(1)
string_args = {'pp' : os.path.abspath(os.curdir) }
if not os.path.exists('phyloplumber_env.sh'):
sys.stdout.write("Creating phyloplumber_env.sh bash script\n")
o = open('phyloplumber_env.sh', 'w')
o.write('''#!/bin/sh
export PHYLOPLUMBER_PARENT="%(pp)s"
export PHYLOPLUMBER_ROOT=${PHYLOPLUMBER_PARENT}/phyloplumber
source ${PHYLOPLUMBER_PARENT}/mydevenv/bin/activate
''' % string_args)
o.close()
if os.path.exists('dendropy'):
string_args['dd'] = 'dendropy'
else:
string_args['dd'] = 'DendroPy'
if not os.path.exists(string_args['dd']):
try:
result = subprocess.call(['git', 'clone', 'git://github.com/jeetsukumaran/DendroPy.git'])
except:
sys.exit("""The attempt to pull down the latest version of dendropy using git failed.
If you do not have git installed, you can download it from http://git-scm.com
If you have installed git, make sure that it is on your path.""")
if result != 0:
sys.exit(1)
if sys.platform.upper().startswith('WIN'):
sys.exit("""At this point you will need to execute the "%(pp)s/mydevenv/bin/activate.bat" script, then
1. run "easy_install sphinx"
2. change the working directory to phyloplumber and run "python setup.py develop"
3. change the working directory to %(dd)s and run "python setup.py develop"
to finish the installation process.
You will need to execute the
"%(pp)s/mydevenv/bin/activate.bat"
script each time you launch the phyloplumber server.
""" % string_args)
else:
fn = 'finish_phyloplumber_installation.sh'
o = open(fn, 'w')
o.write('''#!/bin/sh
source phyloplumber_env.sh || exit 1
################################################################################
# Install sphinx to the devenv
################################################################################
easy_install sphinx
################################################################################
# Checkout dendropy and use "setup.py develop" command to install it the dev env
################################################################################
cd %(dd)s || exit 1
python setup.py develop || exit 1
cd ..
################################################################################
# install phyloplumber using the "setup.py develop" command
################################################################################
cd phyloplumber || exit 1
python setup.py develop || exit 1
cd ..
echo "phyloplumber_env.sh has been written. Whenever you want to work on phyloplumber"
echo " from the command line, then (from a bash shell) source this file to "
echo " configure your environment"
''' % string_args)
o.close()
result = subprocess.call(['/bin/sh', fn])
if result == 0:
os.remove(fn)
else:
sys.exit(1)
| gpl-3.0 | -4,168,957,450,927,577,000 | 35.442623 | 106 | 0.583446 | false | 3.623472 | false | false | false |
ZeusWPI/Haldis | app/views/general.py | 1 | 6260 | "Script to generate the general views of Haldis"
import os
from datetime import datetime, timedelta
import yaml
from typing import Optional
from flask import Flask, render_template, make_response
from flask import request, jsonify
from flask import Blueprint, abort
from flask import current_app as app
from flask import send_from_directory, url_for
from flask_login import login_required
from utils import first
from hlds.definitions import location_definitions
from hlds.models import Location
from models import Order
# import views
from views.order import get_orders
import json
from flask import jsonify
general_bp = Blueprint("general_bp", __name__)
with open(os.path.join(os.path.dirname(__file__), "themes.yml"), "r") as _stream:
_theme_data = yaml.safe_load(_stream)
THEME_OPTIONS = _theme_data["options"]
THEMES = _theme_data["themes"]
@general_bp.route("/")
def home() -> str:
"Generate the home view"
prev_day = datetime.now() - timedelta(days=1)
recently_closed = get_orders(
((Order.stoptime > prev_day) & (Order.stoptime < datetime.now()))
)
return render_template(
"home.html", orders=get_orders(), recently_closed=recently_closed
)
def is_theme_active(theme, now):
theme_type = theme["type"]
if theme_type == "static":
return True
if theme_type == "seasonal":
start_day, start_month = map(int, theme["start"].split("/"))
start_datetime = datetime(year=now.year, day=start_day, month=start_month)
end_day, end_month = map(int, theme["end"].split("/"))
end_year = now.year + (1 if start_month > end_month else 0)
end_datetime = datetime(year=end_year, day=end_day, month=end_month)
return start_datetime <= now <= end_datetime
raise Exception("Unknown theme type {}".format(theme_type))
def get_theme_css(theme, options):
# Build filename
# Each option's chosen value is appended, to get something like mytheme_darkmode_heavy.css
filename = theme["file"]
for option in theme.get("options", []):
theme_name = theme["name"]
assert option in THEME_OPTIONS, f"Theme `{theme_name}` uses undefined option `{option}`"
chosen_value = options[option]
possible_values = list(THEME_OPTIONS[option].keys())
value = chosen_value if chosen_value in possible_values \
else THEME_OPTIONS[option]["_default"]
filename += "_" + value
filename += ".css"
theme_css_dir = "static/css/themes/"
return os.path.join(app.root_path, theme_css_dir, filename)
def get_active_themes():
now = datetime.now()
return [theme for theme in THEMES if is_theme_active(theme, now)]
@general_bp.route("/theme.css")
def theme_css():
"Send appropriate CSS for current theme"
themes = get_active_themes()
theme_name = request.cookies.get("theme", None)
theme = first((t for t in themes if t["file"] == theme_name), default=themes[-1])
options = {
name: request.cookies.get("theme_" + name, None)
for name in ["atmosphere", "performance"]
}
path = get_theme_css(theme, options)
with open(path) as f:
response = make_response(f.read())
response.headers["Content-Type"] = "text/css"
return response
@general_bp.route("/current_theme.js")
def current_theme_js():
themes = get_active_themes()
selected_theme_name = request.cookies.get("theme", None)
matching_theme = first((t for t in themes if t["file"] == selected_theme_name))
cur_theme = matching_theme or themes[-1]
response = make_response(rf'''
var currentTheme = {json.dumps(cur_theme['file'])};
var currentThemeOptions = {json.dumps(cur_theme.get('options', []))};
''')
response.headers["Content-Type"] = "text/javascript"
# Theme name that is not valid at this moment: delete cookie
if matching_theme is None:
response.delete_cookie("theme", path="/")
return response
@general_bp.route("/map")
def map_view() -> str:
"Generate the map view"
return render_template("maps.html", locations=location_definitions)
@general_bp.route("/location")
def locations() -> str:
"Generate the location view"
return render_template("locations.html", locations=location_definitions)
@general_bp.route("/location/<location_id>")
def location(location_id) -> str:
"Generate the location view given an id"
loc = first(filter(lambda l: l.id == location_id, location_definitions))
if loc is None:
abort(404)
return render_template("location.html", location=loc, title=loc.name)
@general_bp.route("/location/<location_id>/<dish_id>")
def location_dish(location_id, dish_id) -> str:
loc: Optional[Location] = first(
filter(lambda l: l.id == location_id, location_definitions)
)
if loc is None:
abort(404)
dish = loc.dish_by_id(dish_id)
if dish is None:
abort(404)
return jsonify([
{
"type": c[0],
"id": c[1].id,
"name": c[1].name,
"description": c[1].description,
"options": [
{
"id": o.id,
"name": o.name,
"description": o.description,
"price": o.price,
"tags": o.tags,
}
for o in c[1].options
],
}
for c in dish.choices
])
@general_bp.route("/about/")
def about() -> str:
"Generate the about view"
return render_template("about.html")
@general_bp.route("/profile/")
@login_required
def profile() -> str:
"Generate the profile view"
return render_template("profile.html", themes_list=get_active_themes())
@general_bp.route("/favicon.ico")
def favicon() -> str:
"Generate the favicon"
# pylint: disable=R1705
if not get_orders((Order.stoptime > datetime.now())):
return send_from_directory(
os.path.join(app.root_path, "static"),
"favicon.ico",
mimetype="image/x-icon",
)
else:
return send_from_directory(
os.path.join(app.root_path, "static"),
"favicon_orange.ico",
mimetype="image/x-icon",
)
| mit | 2,933,671,513,590,761,000 | 27.715596 | 96 | 0.62476 | false | 3.662961 | false | false | false |
Havate/havate-openstack | proto-build/gui/horizon/Horizon_GUI/horizon/utils/fields.py | 5 | 5684 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.exceptions import ValidationError # noqa
from django.forms import forms
from django.forms import widgets
from django.utils.encoding import force_unicode
from django.utils.functional import Promise # noqa
from django.utils.html import conditional_escape
from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _
import netaddr
import re
ip_allowed_symbols_re = re.compile(r'^[a-fA-F0-9:/\.]+$')
IPv4 = 1
IPv6 = 2
class IPField(forms.Field):
"""Form field for entering IP/range values, with validation.
Supports IPv4/IPv6 in the format:
.. xxx.xxx.xxx.xxx
.. xxx.xxx.xxx.xxx/zz
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/zz
and all compressed forms. Also the short forms
are supported:
xxx/yy
xxx.xxx/yy
.. attribute:: version
Specifies which IP version to validate,
valid values are 1 (fields.IPv4), 2 (fields.IPv6) or
both - 3 (fields.IPv4 | fields.IPv6).
Defaults to IPv4 (1)
.. attribute:: mask
Boolean flag to validate subnet masks along with IP address.
E.g: 10.0.0.1/32
.. attribute:: mask_range_from
Subnet range limitation, e.g. 16
That means the input mask will be checked to be in the range
16:max_value. Useful to limit the subnet ranges
to A/B/C-class networks.
"""
invalid_format_message = _("Incorrect format for IP address")
invalid_version_message = _("Invalid version for IP address")
invalid_mask_message = _("Invalid subnet mask")
max_v4_mask = 32
max_v6_mask = 128
def __init__(self, *args, **kwargs):
self.mask = kwargs.pop("mask", None)
self.min_mask = kwargs.pop("mask_range_from", 0)
self.version = kwargs.pop('version', IPv4)
super(IPField, self).__init__(*args, **kwargs)
def validate(self, value):
super(IPField, self).validate(value)
if not value and not self.required:
return
try:
if self.mask:
self.ip = netaddr.IPNetwork(value)
else:
self.ip = netaddr.IPAddress(value)
except Exception:
raise ValidationError(self.invalid_format_message)
if not any([self.version & IPv4 > 0 and self.ip.version == 4,
self.version & IPv6 > 0 and self.ip.version == 6]):
raise ValidationError(self.invalid_version_message)
if self.mask:
if self.ip.version == 4 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v4_mask:
raise ValidationError(self.invalid_mask_message)
if self.ip.version == 6 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v6_mask:
raise ValidationError(self.invalid_mask_message)
def clean(self, value):
super(IPField, self).clean(value)
return str(getattr(self, "ip", ""))
class MultiIPField(IPField):
"""Extends IPField to allow comma-separated lists of addresses."""
def validate(self, value):
self.addresses = []
if value:
addresses = value.split(',')
for ip in addresses:
super(MultiIPField, self).validate(ip)
self.addresses.append(ip)
else:
super(MultiIPField, self).validate(value)
def clean(self, value):
super(MultiIPField, self).clean(value)
return str(','.join(getattr(self, "addresses", [])))
class SelectWidget(widgets.Select):
"""Customizable select widget, that allows to render
data-xxx attributes from choices.
.. attribute:: data_attrs
Specifies object properties to serialize as
data-xxx attribute. If passed ('id', ),
this will be rendered as:
<option data-id="123">option_value</option>
where 123 is the value of choice_value.id
.. attribute:: transform
A callable used to render the display value
from the option object.
"""
def __init__(self, attrs=None, choices=(), data_attrs=(), transform=None):
self.data_attrs = data_attrs
self.transform = transform
super(SelectWidget, self).__init__(attrs, choices)
def render_option(self, selected_choices, option_value, option_label):
option_value = force_unicode(option_value)
other_html = (option_value in selected_choices) and \
u' selected="selected"' or ''
if not isinstance(option_label, (basestring, Promise)):
for data_attr in self.data_attrs:
data_value = conditional_escape(
force_unicode(getattr(option_label,
data_attr, "")))
other_html += ' data-%s="%s"' % (data_attr, data_value)
if self.transform:
option_label = self.transform(option_label)
return u'<option value="%s"%s>%s</option>' % (
escape(option_value), other_html,
conditional_escape(force_unicode(option_label)))
| apache-2.0 | -760,614,181,745,484,900 | 35.203822 | 79 | 0.622977 | false | 4.048433 | false | false | false |
bullxpfs/lustre-shine | lib/Shine/Lustre/Actions/StartTarget.py | 3 | 3994 | # StartTarget.py -- Lustre action class : start (mount) target
# Copyright (C) 2009-2013 CEA
#
# This file is part of shine
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
"""
This module contains a FSAction class to start a Lustre target.
"""
import os
from ClusterShell.Task import task_self
from Shine.Configuration.Globals import Globals
from Shine.Lustre.Actions.Action import FSAction, Result
class StartTarget(FSAction):
"""
File system target start action class.
Lustre, since 1.6, starts a target simply by mounting it.
"""
NAME = 'start'
def __init__(self, target, **kwargs):
FSAction.__init__(self, target, **kwargs)
self.mount_options = kwargs.get('mount_options')
self.mount_paths = kwargs.get('mount_paths')
def _already_done(self):
"""Return a Result object is the target is already mounted."""
# Already done?
if self.comp.is_started():
return Result("%s is already started" % self.comp.label)
# LBUG #18624
if not self.comp.dev_isblk:
task_self().set_info("fanout", 1)
return None
def _prepare_cmd(self):
"""Mount file system target."""
# If there is a user-defined path
if self.mount_paths and self.comp.TYPE in self.mount_paths:
mount_path = self.mount_paths[self.comp.TYPE]
else:
# Default mount path
mount_path = "/mnt/$fs_name/$type/$index"
# Replace variables
var_map = {'index': str(self.comp.index),
'dev' : os.path.basename(self.comp.dev)}
if self.comp.journal:
var_map['jdev'] = os.path.basename(self.comp.journal.dev)
mount_path = self._vars_substitute(mount_path, var_map)
#
# Build command
#
command = ["mkdir -p \"%s\"" % mount_path]
command += ["&& /bin/mount -t lustre"]
# Loop devices handling
if not self.comp.dev_isblk:
command.append("-o loop")
options = []
# Mount options from configuration
if self.mount_options and self.mount_options.get(self.comp.TYPE):
options += [self.mount_options.get(self.comp.TYPE)]
# Mount options from command line
if self.addopts:
options += [self.addopts]
# When device detection order is variable, jdev could have a different
# major/minor than the one it has on previous mount.
# In this case, we must be sure we use the current one to avoid error.
#
# (Note: We can use `blkid' instead of jdev and extract the current
# journal UUID if we have issue using directly jdev path.)
if self.comp.journal:
majorminor = os.stat(self.comp.journal.dev).st_rdev
options += ["journal_dev=%#x" % majorminor]
if len(options):
command.append('-o ' + ','.join(options))
command.append(self.comp.dev)
command.append(mount_path)
return command
def needed_modules(self):
if Globals().lustre_version_is_smaller('2.4') or \
not Globals().lustre_version_is_smaller('2.5'):
return ['lustre', 'ldiskfs']
else:
# lustre 2.4 needs fsfilt_ldiskfs
return ['lustre', 'fsfilt_ldiskfs']
| gpl-2.0 | -6,228,267,472,566,826,000 | 32.283333 | 78 | 0.623185 | false | 3.836695 | false | false | false |
Heathckliff/cantera | interfaces/cython/cantera/examples/thermo/rankine.py | 4 | 1770 | """
A Rankine vapor power cycle
"""
import cantera as ct
# parameters
eta_pump = 0.6 # pump isentropic efficiency
eta_turbine = 0.8 # turbine isentropic efficiency
p_max = 8.0e5 # maximum pressure
def pump(fluid, p_final, eta):
"""Adiabatically pump a fluid to pressure p_final, using
a pump with isentropic efficiency eta."""
h0 = fluid.h
s0 = fluid.s
fluid.SP = s0, p_final
h1s = fluid.h
isentropic_work = h1s - h0
actual_work = isentropic_work / eta
h1 = h0 + actual_work
fluid.HP = h1, p_final
return actual_work
def expand(fluid, p_final, eta):
"""Adiabatically expand a fluid to pressure p_final, using
a turbine with isentropic efficiency eta."""
h0 = fluid.h
s0 = fluid.s
fluid.SP =s0, p_final
h1s = fluid.h
isentropic_work = h0 - h1s
actual_work = isentropic_work * eta
h1 = h0 - actual_work
fluid.HP = h1, p_final
return actual_work
def printState(n, fluid):
print('\n***************** State {0} ******************'.format(n))
print(fluid.report())
if __name__ == '__main__':
# create an object representing water
w = ct.Water()
# start with saturated liquid water at 300 K
w.TX = 300.0, 0.0
h1 = w.h
p1 = w.P
printState(1, w)
# pump it adiabatically to p_max
pump_work = pump(w, p_max, eta_pump)
h2 = w.h
printState(2, w)
# heat it at constant pressure until it reaches the saturated vapor state
# at this pressure
w.PX = p_max, 1.0
h3 = w.h
heat_added = h3 - h2
printState(3, w)
# expand back to p1
turbine_work = expand(w, p1, eta_turbine)
printState(4, w)
# efficiency
eff = (turbine_work - pump_work)/heat_added
print('efficiency = ', eff)
| bsd-3-clause | -2,803,135,325,289,463,300 | 22.6 | 77 | 0.602825 | false | 2.954925 | false | false | false |
paulmadore/Eric-IDE | 6-6.0.9/eric/Plugins/VcsPlugins/vcsMercurial/HgConflictsListDialog.py | 2 | 14728 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing a dialog to show a list of files which had or still have
conflicts.
"""
from __future__ import unicode_literals
import os
from PyQt5.QtCore import pyqtSlot, Qt, QPoint, QProcess, QTimer
from PyQt5.QtGui import QWidget
from PyQt5.QtWidgets import QAbstractButton, QDialogButtonBox, QHeaderView, \
QTreeWidgetItem, QLineEdit, QApplication
from E5Gui import E5MessageBox
from E5Gui.E5Application import e5App
from .Ui_HgConflictsListDialog import Ui_HgConflictsListDialog
import Utilities.MimeTypes
class HgConflictsListDialog(QWidget, Ui_HgConflictsListDialog):
"""
Class implementing a dialog to show a list of files which had or still
have conflicts.
"""
StatusRole = Qt.UserRole + 1
FilenameRole = Qt.UserRole + 2
def __init__(self, vcs, parent=None):
"""
Constructor
@param vcs reference to the vcs object
@param parent parent widget (QWidget)
"""
super(HgConflictsListDialog, self).__init__(parent)
self.setupUi(self)
self.__position = QPoint()
self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)
self.conflictsList.headerItem().setText(
self.conflictsList.columnCount(), "")
self.conflictsList.header().setSortIndicator(0, Qt.AscendingOrder)
self.refreshButton = self.buttonBox.addButton(
self.tr("&Refresh"), QDialogButtonBox.ActionRole)
self.refreshButton.setToolTip(
self.tr("Press to refresh the list of conflicts"))
self.refreshButton.setEnabled(False)
self.vcs = vcs
self.project = e5App().getObject("Project")
self.__hgClient = vcs.getClient()
if self.__hgClient:
self.process = None
else:
self.process = QProcess()
self.process.finished.connect(self.__procFinished)
self.process.readyReadStandardOutput.connect(self.__readStdout)
self.process.readyReadStandardError.connect(self.__readStderr)
def closeEvent(self, e):
"""
Protected slot implementing a close event handler.
@param e close event (QCloseEvent)
"""
if self.__hgClient:
if self.__hgClient.isExecuting():
self.__hgClient.cancel()
else:
if self.process is not None and \
self.process.state() != QProcess.NotRunning:
self.process.terminate()
QTimer.singleShot(2000, self.process.kill)
self.process.waitForFinished(3000)
self.__position = self.pos()
e.accept()
def show(self):
"""
Public slot to show the dialog.
"""
if not self.__position.isNull():
self.move(self.__position)
super(HgConflictsListDialog, self).show()
def start(self, path):
"""
Public slot to start the tags command.
@param path name of directory to list conflicts for (string)
"""
self.errorGroup.hide()
QApplication.processEvents()
self.intercept = False
dname, fname = self.vcs.splitPath(path)
# find the root of the repo
self.__repodir = dname
while not os.path.isdir(
os.path.join(self.__repodir, self.vcs.adminDir)):
self.__repodir = os.path.dirname(self.__repodir)
if os.path.splitdrive(self.__repodir)[1] == os.sep:
return
self.activateWindow()
self.raise_()
self.conflictsList.clear()
self.__started = True
self.__getEntries()
def __getEntries(self):
"""
Private method to get the conflict entries.
"""
args = self.vcs.initCommand("resolve")
args.append('--list')
if self.__hgClient:
self.inputGroup.setEnabled(False)
self.inputGroup.hide()
out, err = self.__hgClient.runcommand(args)
if err:
self.__showError(err)
if out:
for line in out.splitlines():
self.__processOutputLine(line)
if self.__hgClient.wasCanceled():
break
self.__finish()
else:
self.process.kill()
self.process.setWorkingDirectory(self.__repodir)
self.process.start('hg', args)
procStarted = self.process.waitForStarted(5000)
if not procStarted:
self.inputGroup.setEnabled(False)
self.inputGroup.hide()
E5MessageBox.critical(
self,
self.tr('Process Generation Error'),
self.tr(
'The process {0} could not be started. '
'Ensure, that it is in the search path.'
).format('hg'))
else:
self.inputGroup.setEnabled(True)
self.inputGroup.show()
def __finish(self):
"""
Private slot called when the process finished or the user pressed
the button.
"""
if self.process is not None and \
self.process.state() != QProcess.NotRunning:
self.process.terminate()
QTimer.singleShot(2000, self.process.kill)
self.process.waitForFinished(3000)
QApplication.restoreOverrideCursor()
self.buttonBox.button(QDialogButtonBox.Close).setEnabled(True)
self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Close).setDefault(True)
self.inputGroup.setEnabled(False)
self.inputGroup.hide()
self.refreshButton.setEnabled(True)
self.__resizeColumns()
self.__resort()
self.on_conflictsList_itemSelectionChanged()
@pyqtSlot(QAbstractButton)
def on_buttonBox_clicked(self, button):
"""
Private slot called by a button of the button box clicked.
@param button button that was clicked (QAbstractButton)
"""
if button == self.buttonBox.button(QDialogButtonBox.Close):
self.close()
elif button == self.buttonBox.button(QDialogButtonBox.Cancel):
if self.__hgClient:
self.__hgClient.cancel()
else:
self.__finish()
elif button == self.refreshButton:
self.on_refreshButton_clicked()
def __procFinished(self, exitCode, exitStatus):
"""
Private slot connected to the finished signal.
@param exitCode exit code of the process (integer)
@param exitStatus exit status of the process (QProcess.ExitStatus)
"""
self.__finish()
def __resort(self):
"""
Private method to resort the tree.
"""
self.conflictsList.sortItems(
self.conflictsList.sortColumn(),
self.conflictsList.header().sortIndicatorOrder())
def __resizeColumns(self):
"""
Private method to resize the list columns.
"""
self.conflictsList.header().resizeSections(
QHeaderView.ResizeToContents)
self.conflictsList.header().setStretchLastSection(True)
def __generateItem(self, status, name):
"""
Private method to generate a tag item in the tag list.
@param status status of the file (string)
@param name name of the file (string)
"""
itm = QTreeWidgetItem(self.conflictsList)
if status == "U":
itm.setText(0, self.tr("Unresolved"))
elif status == "R":
itm.setText(0, self.tr("Resolved"))
else:
itm.setText(0, self.tr("Unknown Status"))
itm.setText(1, name)
itm.setData(0, self.StatusRole, status)
itm.setData(0, self.FilenameRole, self.project.getAbsolutePath(name))
def __readStdout(self):
"""
Private slot to handle the readyReadStdout signal.
It reads the output of the process, formats it and inserts it into
the contents pane.
"""
self.process.setReadChannel(QProcess.StandardOutput)
while self.process.canReadLine():
s = str(self.process.readLine(), self.vcs.getEncoding(),
'replace').strip()
self.__processOutputLine(s)
def __processOutputLine(self, line):
"""
Private method to process the lines of output.
@param line output line to be processed (string)
"""
status, filename = line.strip().split(None, 1)
self.__generateItem(status, filename)
@pyqtSlot()
def on_refreshButton_clicked(self):
"""
Private slot to refresh the log.
"""
self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)
self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)
self.inputGroup.setEnabled(True)
self.inputGroup.show()
self.refreshButton.setEnabled(False)
self.start(self.__repodir)
def __readStderr(self):
"""
Private slot to handle the readyReadStderr signal.
It reads the error output of the process and inserts it into the
error pane.
"""
if self.process is not None:
s = str(self.process.readAllStandardError(),
self.vcs.getEncoding(), 'replace')
self.__showError(s)
def __showError(self, out):
"""
Private slot to show some error.
@param out error to be shown (string)
"""
self.errorGroup.show()
self.errors.insertPlainText(out)
self.errors.ensureCursorVisible()
def on_passwordCheckBox_toggled(self, isOn):
"""
Private slot to handle the password checkbox toggled.
@param isOn flag indicating the status of the check box (boolean)
"""
if isOn:
self.input.setEchoMode(QLineEdit.Password)
else:
self.input.setEchoMode(QLineEdit.Normal)
@pyqtSlot()
def on_sendButton_clicked(self):
"""
Private slot to send the input to the subversion process.
"""
input = self.input.text()
input += os.linesep
if self.passwordCheckBox.isChecked():
self.errors.insertPlainText(os.linesep)
self.errors.ensureCursorVisible()
else:
self.errors.insertPlainText(input)
self.errors.ensureCursorVisible()
self.process.write(input)
self.passwordCheckBox.setChecked(False)
self.input.clear()
def on_input_returnPressed(self):
"""
Private slot to handle the press of the return key in the input field.
"""
self.intercept = True
self.on_sendButton_clicked()
def keyPressEvent(self, evt):
"""
Protected slot to handle a key press event.
@param evt the key press event (QKeyEvent)
"""
if self.intercept:
self.intercept = False
evt.accept()
return
super(HgConflictsListDialog, self).keyPressEvent(evt)
@pyqtSlot(QTreeWidgetItem, int)
def on_conflictsList_itemDoubleClicked(self, item, column):
"""
Private slot to open the double clicked entry.
@param item reference to the double clicked item (QTreeWidgetItem)
@param column column that was double clicked (integer)
"""
self.on_editButton_clicked()
@pyqtSlot()
def on_conflictsList_itemSelectionChanged(self):
"""
Private slot to handle a change of selected conflict entries.
"""
selectedCount = len(self.conflictsList.selectedItems())
unresolved = resolved = 0
for itm in self.conflictsList.selectedItems():
status = itm.data(0, self.StatusRole)
if status == "U":
unresolved += 1
elif status == "R":
resolved += 1
self.resolvedButton.setEnabled(unresolved > 0)
self.unresolvedButton.setEnabled(resolved > 0)
self.reMergeButton.setEnabled(unresolved > 0)
self.editButton.setEnabled(
selectedCount == 1 and
Utilities.MimeTypes.isTextFile(
self.conflictsList.selectedItems()[0].data(
0, self.FilenameRole)))
@pyqtSlot()
def on_resolvedButton_clicked(self):
"""
Private slot to mark the selected entries as resolved.
"""
names = [
itm.data(0, self.FilenameRole)
for itm in self.conflictsList.selectedItems()
if itm.data(0, self.StatusRole) == "U"
]
if names:
self.vcs.hgResolved(names)
self.on_refreshButton_clicked()
@pyqtSlot()
def on_unresolvedButton_clicked(self):
"""
Private slot to mark the selected entries as unresolved.
"""
names = [
itm.data(0, self.FilenameRole)
for itm in self.conflictsList.selectedItems()
if itm.data(0, self.StatusRole) == "R"
]
if names:
self.vcs.hgResolved(names, unresolve=True)
self.on_refreshButton_clicked()
@pyqtSlot()
def on_reMergeButton_clicked(self):
"""
Private slot to re-merge the selected entries.
"""
names = [
itm.data(0, self.FilenameRole)
for itm in self.conflictsList.selectedItems()
if itm.data(0, self.StatusRole) == "U"
]
if names:
self.vcs.hgReMerge(names)
@pyqtSlot()
def on_editButton_clicked(self):
"""
Private slot to open the selected file in an editor.
"""
itm = self.conflictsList.selectedItems()[0]
filename = itm.data(0, self.FilenameRole)
if Utilities.MimeTypes.isTextFile(filename):
e5App().getObject("ViewManager").getEditor(filename)
| gpl-3.0 | 238,113,331,321,496,500 | 32.022422 | 78 | 0.57299 | false | 4.412223 | false | false | false |
MaxNoe/cta_event_viewer | protobuf/cta_event_pb2.py | 1 | 2433 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cta_event.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='cta_event.proto',
package='cta_event',
serialized_pb=_b('\n\x0f\x63ta_event.proto\x12\tcta_event\"2\n\x08\x43TAEvent\x12\x14\n\x0ctelescope_id\x18\x01 \x02(\r\x12\x10\n\x04\x64\x61ta\x18\x04 \x03(\x02\x42\x02\x10\x01')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CTAEVENT = _descriptor.Descriptor(
name='CTAEvent',
full_name='cta_event.CTAEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='telescope_id', full_name='cta_event.CTAEvent.telescope_id', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='cta_event.CTAEvent.data', index=1,
number=4, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=30,
serialized_end=80,
)
DESCRIPTOR.message_types_by_name['CTAEvent'] = _CTAEVENT
CTAEvent = _reflection.GeneratedProtocolMessageType('CTAEvent', (_message.Message,), dict(
DESCRIPTOR = _CTAEVENT,
__module__ = 'cta_event_pb2'
# @@protoc_insertion_point(class_scope:cta_event.CTAEvent)
))
_sym_db.RegisterMessage(CTAEvent)
_CTAEVENT.fields_by_name['data'].has_options = True
_CTAEVENT.fields_by_name['data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
# @@protoc_insertion_point(module_scope)
| mit | -9,132,853,243,116,970,000 | 31.013158 | 181 | 0.7127 | false | 3.056533 | false | true | false |
philiparvidsson/pymake2 | src/pymake2/cli/main.py | 1 | 2868 | """
Provides the command-line interface for pymake2.
"""
#---------------------------------------
# IMPORTS
#---------------------------------------
import os
import sys
from pymake2 import report
from pymake2.cli import info, options
from pymake2.core import makeconf
from pymake2.core.exceptions import NoSuchTargetError
from pymake2.core.maker import Maker
from pymake2.core.target import Target
from pymake2.utils import color
#---------------------------------------
# CONSTANTS
#---------------------------------------
# Exit code when a fatal error has been encountered.
EXIT_FATAL = -1
# Exit code all went well.
EXIT_MAKE_OK = 0
# Exit code when there was nothing to do.
EXIT_NO_MAKE = 1
#---------------------------------------
# GLOBALS
#---------------------------------------
# Pymake exit code.
exit_code = EXIT_MAKE_OK
#---------------------------------------
# FUNCTIONS
#---------------------------------------
def fatal(s, *args):
s = "fatal: " + s.format(*args)
if not options.disable_color:
s = color.red(s)
println(s)
sys.exit(EXIT_FATAL)
def println(s=None, *args):
if s:
s = s.format(*args)
print s
else:
print
def pymake2(conf=None, args=None):
args = sys.argv if args is None else [sys.argv[0]] + args
# Keep arguments beginning with two hyphens.
opts = [arg for arg in args if arg.startswith('--')]
# Keep arguments *not* beginning with two hyphens.
args = [arg for arg in args if arg not in opts]
# Parse command line options.
options.parse(opts)
if conf and isinstance(conf, dict):
conf = makeconf.from_dict(conf)
conf = conf or options.conf or makeconf.from_dict({})
if options.conf:
conf = makeconf.merge(conf, options.conf)
Maker.inst().check_targets()
report_problems()
targets = args[1:]
if not targets:
targets = [ None ]
for name in targets:
if not name and not Maker.inst().def_target:
println("\nNo target specified and there is no default target.")
info.print_targets()
sys.exit(EXIT_NO_MAKE)
try:
Maker.inst().make(name, conf)
except NoSuchTargetError as e:
fatal("no such target: '{}'", e.target_name)
#sys.exit(exit_code)
def report_problems():
any_errors = False
# Report all problems
for problem in report.problems():
if problem.is_error:
any_errors = True
s = problem.text
if not options.disable_color:
if problem.is_error: s = color.red (s)
else : s = color.yellow(s)
if problem.is_error or not options.disable_warnings:
println(s)
if any_errors:
fatal("there were errors; aborting.")
| mit | 1,219,372,225,046,710,000 | 23.305085 | 76 | 0.541144 | false | 3.875676 | false | false | false |
IntelLabs/hpat | sdc/tests/tests_perf/generator.py | 1 | 4291 | import time
import numpy as np
from typing import NamedTuple
from sdc.io.csv_ext import to_varname
from sdc.tests.test_utils import *
class CallExpression(NamedTuple):
"""
code: function or method call as a string
type_: type of function performed (Python, Numba, SDC)
jitted: option indicating whether to jit call
"""
code: str
type_: str
jitted: bool
class TestCase(NamedTuple):
"""
name: name of the API item, e.g. method, operator
size: size of the generated data for tests
params: method parameters in format 'par1, par2, ...'
call_expr: call expression as a string, e.g. '(A+B).sum()' where A, B are Series or DF
usecase_params: input parameters for usecase in format 'par1, par2, ...', e.g. 'data, other'
data_num: total number of generated data, e.g. 2 (data, other)
input_data: input data for generating test data
skip: flag for skipping a test
"""
name: str
size: list
params: str = ''
call_expr: str = None
usecase_params: str = None
data_num: int = 1
input_data: list = None
skip: bool = False
def to_varname_without_excess_underscores(string):
"""Removing excess underscores from the string."""
return '_'.join(i for i in to_varname(string).split('_') if i)
def generate_test_cases(cases, class_add, typ, prefix=''):
for test_case in cases:
test_name_parts = ['test', typ, prefix, test_case.name, gen_params_wo_data(test_case)]
test_name = to_varname_without_excess_underscores('_'.join(test_name_parts))
setattr(class_add, test_name, gen_test(test_case, prefix))
def gen_params_wo_data(test_case):
"""Generate API item parameters without parameters with data, e.g. without parameter other"""
extra_data_num = test_case.data_num - 1
method_params = test_case.params.split(', ')[extra_data_num:]
return ', '.join(method_params)
def gen_usecase_params(test_case):
"""Generate usecase parameters based on method parameters and number of extra generated data"""
extra_data_num = test_case.data_num - 1
extra_usecase_params = test_case.params.split(', ')[:extra_data_num]
usecase_params_parts = ['data'] + extra_usecase_params
return ', '.join(usecase_params_parts)
def gen_call_expr(test_case, prefix):
"""Generate call expression based on method name and parameters and method prefix, e.g. str"""
prefix_as_list = [prefix] if prefix else []
call_expr_parts = ['data'] + prefix_as_list + ['{}({})'.format(test_case.name, test_case.params)]
return '.'.join(call_expr_parts)
def gen_test(test_case, prefix):
func_name = 'func'
usecase = gen_usecase(test_case, prefix)
skip = '@skip_numba_jit\n' if test_case.skip else ''
test_name = test_case.name
if test_case.params:
test_name = f'{test_name}({test_case.params})'
func_text = f"""
{skip}def {func_name}(self):
self._test_case(usecase, name='{test_name}', total_data_length={test_case.size},
data_num={test_case.data_num}, input_data={test_case.input_data})
"""
loc_vars = {}
global_vars = {'usecase': usecase,
'skip_numba_jit': skip_numba_jit}
exec(func_text, global_vars, loc_vars)
func = loc_vars[func_name]
return func
def create_func(usecase_params, call_expr):
func_name = 'func'
func_text = f"""
def {func_name}({usecase_params}):
start_time = time.time()
res = {call_expr}
finish_time = time.time()
return finish_time - start_time, res
"""
loc_vars = {}
exec(func_text, globals(), loc_vars)
func = loc_vars[func_name]
return func
def gen_usecase(test_case, prefix):
usecase_params = test_case.usecase_params
call_expr = test_case.call_expr
if call_expr is None:
if usecase_params is None:
usecase_params = gen_usecase_params(test_case)
call_expr = gen_call_expr(test_case, prefix)
if isinstance(call_expr, list):
results = []
for ce in call_expr:
results.append({
'func': create_func(usecase_params, ce.code),
'type_': ce.type_,
'jitted': ce.jitted
})
return results
func = create_func(usecase_params, call_expr)
return func
| bsd-2-clause | -4,105,631,501,158,639,000 | 29.006993 | 101 | 0.636215 | false | 3.389415 | true | false | false |
indico/indico | indico/modules/events/management/controllers/protection.py | 3 | 7204 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from operator import attrgetter
from flask import flash, jsonify, redirect, request
from werkzeug.exceptions import NotFound
from indico.core.db.sqlalchemy.protection import ProtectionMode, render_acl
from indico.core.permissions import (get_available_permissions, get_permissions_info, get_principal_permissions,
update_permissions)
from indico.modules.categories.models.roles import CategoryRole
from indico.modules.categories.util import serialize_category_role
from indico.modules.core.controllers import PrincipalsMixin
from indico.modules.events import Event
from indico.modules.events.controllers.base import RHAuthenticatedEventBase
from indico.modules.events.management.controllers.base import RHManageEventBase
from indico.modules.events.management.forms import EventProtectionForm
from indico.modules.events.management.views import WPEventProtection
from indico.modules.events.operations import update_event_protection
from indico.modules.events.roles.util import serialize_event_role
from indico.modules.events.sessions import COORDINATOR_PRIV_SETTINGS, session_settings
from indico.modules.events.sessions.operations import update_session_coordinator_privs
from indico.modules.events.util import get_object_from_args
from indico.util import json
from indico.util.i18n import _
from indico.util.marshmallow import PrincipalDict
from indico.web.args import use_rh_kwargs
from indico.web.flask.util import url_for
from indico.web.forms.base import FormDefaults
from indico.web.forms.fields.principals import PermissionsField, serialize_principal
from indico.web.rh import RH
from indico.web.util import jsonify_template
class RHShowNonInheriting(RHManageEventBase):
"""Show a list of non-inheriting child objects."""
def _process_args(self):
RHManageEventBase._process_args(self)
self.obj = get_object_from_args()[2]
if self.obj is None:
raise NotFound
def _process(self):
objects = self.obj.get_non_inheriting_objects()
return jsonify_template('events/management/non_inheriting_objects.html', objects=objects)
class RHEventACL(RHManageEventBase):
"""Display the inherited ACL of the event."""
def _process(self):
return render_acl(self.event)
class RHEventACLMessage(RHManageEventBase):
"""Render the inheriting ACL message."""
def _process(self):
mode = ProtectionMode[request.args['mode']]
return jsonify_template('forms/protection_field_acl_message.html', object=self.event, mode=mode,
endpoint='event_management.acl')
class RHEventProtection(RHManageEventBase):
"""Show event protection."""
def _process(self):
form = EventProtectionForm(obj=FormDefaults(**self._get_defaults()), event=self.event)
selectable_permissions = {k for k, v in get_available_permissions(Event).items() if v.user_selectable}
user_permissions = [(p.principal, set(p.permissions)) for p in self.event.acl_entries]
hidden_permissions = sorted((
(principal, sorted(perms))
for principal, perms in user_permissions
if perms and not (perms & selectable_permissions)
), key=lambda x: (x[0].principal_order, x[0].name.lower()))
form.permissions.hidden_permissions = [(p.name, perms) for p, perms in hidden_permissions]
if form.validate_on_submit():
update_permissions(self.event, form)
update_event_protection(self.event, {'protection_mode': form.protection_mode.data,
'own_no_access_contact': form.own_no_access_contact.data,
'access_key': form.access_key.data,
'visibility': form.visibility.data,
'public_regform_access': form.public_regform_access.data})
self._update_session_coordinator_privs(form)
flash(_('Protection settings have been updated'), 'success')
return redirect(url_for('.protection', self.event))
return WPEventProtection.render_template('event_protection.html', self.event, 'protection', form=form)
def _get_defaults(self):
registration_managers = {p.principal for p in self.event.acl_entries
if p.has_management_permission('registration', explicit=True)}
event_session_settings = session_settings.get_all(self.event)
coordinator_privs = {name: event_session_settings[val] for name, val in COORDINATOR_PRIV_SETTINGS.items()
if event_session_settings.get(val)}
permissions = [[serialize_principal(p.principal), list(get_principal_permissions(p, Event))]
for p in self.event.acl_entries]
permissions = [item for item in permissions if item[1]]
return dict({'protection_mode': self.event.protection_mode, 'registration_managers': registration_managers,
'access_key': self.event.access_key, 'visibility': self.event.visibility,
'own_no_access_contact': self.event.own_no_access_contact,
'public_regform_access': self.event.public_regform_access,
'permissions': permissions},
**coordinator_privs)
def _update_session_coordinator_privs(self, form):
data = {field: getattr(form, field).data for field in form.priv_fields}
update_session_coordinator_privs(self.event, data)
class RHPermissionsDialog(RH):
def _process(self):
principal = json.loads(request.form['principal'])
permissions_tree = get_permissions_info(PermissionsField.type_mapping[request.view_args['type']])[1]
return jsonify_template('events/management/permissions_dialog.html', permissions_tree=permissions_tree,
permissions=request.form.getlist('permissions'), principal=principal)
class RHEventPrincipals(PrincipalsMixin, RHAuthenticatedEventBase):
@use_rh_kwargs({
'values': PrincipalDict(allow_groups=True, allow_external_users=True, allow_event_roles=True,
allow_category_roles=True, allow_registration_forms=True, allow_emails=True,
missing={})
}, rh_context=('event',))
def _process(self, values):
self.values = values
return PrincipalsMixin._process(self)
class RHEventRolesJSON(RHAuthenticatedEventBase):
def _process(self):
event_roles = sorted(self.event.roles, key=attrgetter('code'))
return jsonify([serialize_event_role(er, legacy=False) for er in event_roles])
class RHCategoryRolesJSON(RHAuthenticatedEventBase):
def _process(self):
category_roles = CategoryRole.get_category_roles(self.event.category)
return jsonify([serialize_category_role(cr, legacy=False) for cr in category_roles])
| mit | 4,333,181,202,084,517,000 | 48.682759 | 115 | 0.686008 | false | 4.109527 | false | false | false |
jdwittenauer/ionyx | ionyx/contrib/category_to_numeric.py | 1 | 2642 | import numpy as np
class CategoryToNumeric(object):
"""
Transform class that replaces a categorical value with a representative target value
for instances that belong to that category. This technique is useful as a method to
turn categorical features into numeric values for use in an estimator, and can be
viewed as an alternative approach to one-hot encoding. Only suitable for regression
tasks.
Parameters
----------
categorical_features : list
A list of integers representing the column indices to apply the transform to.
metric : {'mean', 'median', 'std'}, optional, default 'mean'
The method used to calculate the replacement value for a category.
Attributes
----------
feature_map_ : dict
Mapping of categorical to target values.
"""
def __init__(self, categorical_features, metric='mean'):
self.categorical_features = categorical_features
self.metric = metric
self.feature_map_ = {}
def fit(self, X, y):
"""
Fit the transform using X as the training data and y as the label.
Parameters
----------
X : array-like
Training input samples.
y : array-like
Target values.
"""
for i in self.categorical_features:
self.feature_map_[i] = {}
distinct = list(np.unique(X[:, i]))
for j in distinct:
if self.metric == 'mean':
self.feature_map_[i][j] = y[X[:, i] == j].mean()
elif self.metric == 'median':
self.feature_map_[i][j] = y[X[:, i] == j].median()
elif self.metric == 'std':
self.feature_map_[i][j] = y[X[:, i] == j].std()
else:
raise Exception('Metric not recognized.')
def transform(self, X):
"""
Apply the transform to the data.
Parameters
----------
X : array-like
Training input samples.
"""
X_trans = np.copy(X)
for i in self.categorical_features:
distinct = list(np.unique(X_trans[:, i]))
for j in distinct:
X_trans[X_trans[:, i] == j, i] = self.feature_map_[i][j]
return X_trans
def fit_transform(self, X, y):
"""
Wrapper method that calls fit and transform sequentially.
Parameters
----------
X : array-like
Training input samples.
y : array-like
Target values.
"""
self.fit(X, y)
return self.transform(X)
| apache-2.0 | 3,332,585,937,923,979,300 | 30.082353 | 88 | 0.539364 | false | 4.531732 | false | false | false |
DemocracyClub/yournextrepresentative | ynr/apps/people/migrations/0004_move_person_data.py | 1 | 1879 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-10-23 18:45
from __future__ import unicode_literals
from django.db import migrations
def move_person_from_popolo(apps, schema_editor):
PopoloPerson = apps.get_model("popolo", "Person")
PeoplePerson = apps.get_model("people", "Person")
for pperson in PopoloPerson.objects.all():
people_person = PeoplePerson.objects.create(
pk=pperson.pk,
start_date=pperson.start_date,
end_date=pperson.end_date,
created_at=pperson.created_at,
updated_at=pperson.updated_at,
name=pperson.name,
family_name=pperson.family_name,
given_name=pperson.given_name,
additional_name=pperson.additional_name,
honorific_prefix=pperson.honorific_prefix,
honorific_suffix=pperson.honorific_suffix,
patronymic_name=pperson.patronymic_name,
sort_name=pperson.sort_name,
email=pperson.email,
gender=pperson.gender,
birth_date=pperson.birth_date,
death_date=pperson.death_date,
summary=pperson.summary,
biography=pperson.biography,
national_identity=pperson.national_identity,
versions=pperson.versions,
)
for election in pperson.not_standing.all():
people_person.not_standing.add(election)
class Migration(migrations.Migration):
dependencies = [("people", "0003_add_person_model")]
operations = [
migrations.RunPython(
move_person_from_popolo, migrations.RunPython.noop
),
migrations.RunSQL(
"""
SELECT setval('people_person_id_seq', COALESCE((SELECT MAX(id)+1
FROM people_person), 1));
""",
migrations.RunSQL.noop,
),
]
| agpl-3.0 | 3,368,418,970,247,607,300 | 33.163636 | 79 | 0.602448 | false | 3.750499 | false | false | false |
alberthxf/custodian | setup.py | 1 | 1560 | import os
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
with open("README.rst") as f:
long_desc = f.read()
ind = long_desc.find("\n")
long_desc = long_desc[ind + 1:]
setup(
name="custodian",
packages=find_packages(),
version="0.8.1",
install_requires=["monty>=0.5.9", "six"],
extras_require={"vasp, nwchem, qchem": ["pymatgen>=3.0.2"]},
package_data={},
author="Shyue Ping Ong, William Davidson Richards, Stephen Dacek, "
"Xiaohui Qu",
author_email="[email protected]",
maintainer="Shyue Ping Ong",
url="https://github.com/materialsproject/custodian",
license="MIT",
description="A simple JIT job management framework in Python.",
long_description=long_desc,
keywords=["jit", "just-in-time", "job", "management", "vasp"],
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules"
],
scripts=[os.path.join("scripts", f) for f in os.listdir("scripts")]
)
| mit | 3,628,376,852,117,945,000 | 35.27907 | 71 | 0.619231 | false | 3.653396 | false | false | false |
vascotenner/holoviews | holoviews/__init__.py | 1 | 3414 | from __future__ import print_function, absolute_import
import os, sys, pydoc
import numpy as np # noqa (API import)
_cwd = os.path.abspath(os.path.split(__file__)[0])
sys.path.insert(0, os.path.join(_cwd, '..', 'param'))
import param
__version__ = param.Version(release=(1,5,0), fpath=__file__,
commit="$Format:%h$", reponame='holoviews')
from .core import archive # noqa (API import)
from .core.dimension import OrderedDict, Dimension # noqa (API import)
from .core.boundingregion import BoundingBox # noqa (API import)
from .core.options import Options, Store, StoreOptions # noqa (API import)
from .core.layout import * # noqa (API import)
from .core.element import * # noqa (API import)
from .core.overlay import * # noqa (API import)
from .core.tree import * # noqa (API import)
from .core.spaces import * # noqa (API import)
from .interface import * # noqa (API import)
from .operation import ElementOperation, MapOperation, TreeOperation # noqa (API import)
from .element import * # noqa (API import)
# Surpress warnings generated by NumPy in matplotlib
# Expected to be fixed in next matplotlib release
import warnings
warnings.filterwarnings("ignore",
message="elementwise comparison failed; returning scalar instead")
try:
import IPython # noqa (API import)
from .ipython import notebook_extension
except ImportError as e:
class notebook_extension(param.ParameterizedFunction):
def __call__(self, *args, **opts):
raise Exception("IPython notebook not available")
if str(e) != 'No module named IPython':
raise e
# A single holoviews.rc file may be executed if found.
for rcfile in [os.environ.get("HOLOVIEWSRC", ''),
"~/.holoviews.rc",
"~/.config/holoviews/holoviews.rc"]:
try:
filename = os.path.expanduser(rcfile)
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
try:
exec(code)
except Exception as e:
print("Warning: Could not load %r [%r]" % (filename, str(e)))
break
except IOError:
pass
def help(obj, visualization=True, ansi=True, backend='matplotlib',
recursive=False, pattern=None):
"""
Extended version of the built-in help that supports parameterized
functions and objects. A pattern (regular expression) may be used to
filter the output and if recursive is set to True, documentation for
the supplied object is shown. Note that the recursive option will
only work with an object instance and not a class.
If ansi is set to False, all ANSI color
codes are stripped out.
"""
info = Store.info(obj, ansi=ansi, backend=backend, visualization=visualization,
recursive=recursive, pattern=pattern)
msg = ( "\nTo view the visualization options applicable to this "
"object or class, use:\n\n"
" holoviews.help(obj, visualization=True)\n\n")
if info:
print((msg if visualization is False else '') + info)
else:
pydoc.help(obj)
| bsd-3-clause | 414,004,592,514,076,860 | 40.634146 | 90 | 0.603984 | false | 4.272841 | false | false | false |
arne-cl/discoursegraphs | src/discoursegraphs/readwrite/rst/rstlatex.py | 1 | 8330 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Arne Neumann <[email protected]>
"""
This module contains code to generate figures of RST trees in Latex
(using the rst.sty package).
"""
# Python 2/3 compatibility
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import codecs
import string
import re
import nltk
from discoursegraphs.readwrite.rst.rs3.rs3tree import RSTTree
MULTISAT_RELNAME = 'MONONUC-MULTISAT'
RSTSEGMENT_TEMPLATE = string.Template("""\\rstsegment{$segment}""") # \rstsegment{Foo}
NUC_TEMPLATE = string.Template("""{}{$nucleus}""")
SAT_TEMPLATE = string.Template("""{$relation}{$satellite}""")
MULTINUC_TEMPLATE = string.Template("""\multirel{$relation}$nucleus_segments""")
RSTLATEX_TREE_RE = re.compile("\\\(dirrel|multirel)")
class RSTLatexFileWriter(object):
def __init__(self, tree, output_filepath=None):
self.tree = tree
self.rstlatextree = rsttree2rstlatex(tree)
if output_filepath is not None:
with codecs.open(output_filepath, 'w', 'utf-8') as outfile:
outfile.write(self.rstlatextree + '\n')
def __str__(self):
return self.rstlatextree
def is_nltktreelike(obj):
"""Returns true, iff the given object behaves like an nltk.Tree.
This is a "duck-typing" workaround as most RST tree classes do not
inherit from nltk.Tree but rather embed it.
"""
return hasattr(obj, 'label') and callable(obj.label)
def get_node_type(tree):
"""Returns the type of the root node of the given RST tree
(one of 'N', 'S', 'relation' or 'edu'.)
"""
if is_nltktreelike(tree):
if tree.label() in ('N', 'S'):
return tree.label()
else:
return 'relation'
elif isinstance(tree, basestring):
return 'edu'
else:
raise ValueError("Unknown tree/node type: {}".format(type(tree)))
def is_edu_segment(rstlatex_string):
"""Returns true, iff the given string does not contain an RST subtree."""
return RSTLATEX_TREE_RE.search(rstlatex_string) is None
def wrap_edu_segment(edu_segment):
"""Wraps the string content of an EDU in RST Latex markup."""
return RSTSEGMENT_TEMPLATE.substitute(segment=edu_segment)
def make_nucsat(relname, nuc_types, elements):
"""Creates a rst.sty Latex string representation of a standard RST relation
(one nucleus, one satellite).
"""
assert len(elements) == 2 and len(nuc_types) == 2, \
"A nucsat relation must have two elements."
assert set(nuc_types) == set(['N', 'S']), \
"A nucsat relation must consist of one nucleus and one satellite."
result = "\dirrel"
for i, nuc_type in enumerate(nuc_types):
element = elements[i]
if is_edu_segment(element):
element = wrap_edu_segment(element)
if nuc_type == 'N':
result += '\n\t' + NUC_TEMPLATE.substitute(nucleus=element)
else:
result += '\n\t' + SAT_TEMPLATE.substitute(satellite=element, relation=relname)
return result
def make_multinuc(relname, nucleii):
"""Creates a rst.sty Latex string representation of a multi-nuclear RST relation."""
nuc_strings = []
for nucleus in nucleii:
if is_edu_segment(nucleus):
nucleus = wrap_edu_segment(nucleus)
nuc_strings.append('{' + nucleus + '}')
nucleii_string = "\n\t" + "\n\t".join(nuc_strings)
return MULTINUC_TEMPLATE.substitute(relation=relname, nucleus_segments=nucleii_string)
def make_multisat(nucsat_tuples):
"""Creates a rst.sty Latex string representation of a multi-satellite RST subtree
(i.e. merge a set of nucleus-satellite relations that share the same nucleus
into one subtree).
"""
nucsat_tuples = [tup for tup in nucsat_tuples] # unpack the iterable, so we can check its length
assert len(nucsat_tuples) > 1, \
"A multisat relation bundle must contain more than one relation"
result = "\dirrel\n\t"
first_relation, remaining_relations = nucsat_tuples[0], nucsat_tuples[1:]
relname, nuc_types, elements = first_relation
first_nucleus_pos = current_nucleus_pos = nuc_types.index('N')
result_segments = []
# add elements (nucleus and satellite) from first relation to resulting (sub)tree
for i, nuc_type in enumerate(nuc_types):
element = elements[i]
if is_edu_segment(element):
element = wrap_edu_segment(element)
if nuc_type == 'N':
result_segments.append(NUC_TEMPLATE.substitute(nucleus=element))
else:
result_segments.append(SAT_TEMPLATE.substitute(satellite=element, relation=relname))
# reorder elements of the remaining relation and add them to the resulting (sub)tree
for (relname, nuc_types, elements) in remaining_relations:
for i, nuc_type in enumerate(nuc_types):
if nuc_type == 'N': # all relations share the same nucleus, so we don't need to reprocess it.
continue
else:
element = elements[i]
if is_edu_segment(element):
element = wrap_edu_segment(element)
result_segment = SAT_TEMPLATE.substitute(satellite=element, relation=relname)
if i < first_nucleus_pos: # satellite comes before the nucleus
result_segments.insert(current_nucleus_pos, result_segment)
current_nucleus_pos += 1
else:
result_segments.append(result_segment)
return result + '\n\t'.join(result_segments)
def rsttree2rstlatex(tree, indent_level=0):
node_type = get_node_type(tree)
if node_type == 'relation':
relname = tree.label()
expected_types = set(['N', 'S'])
child_node_types = [get_node_type(child) for child in tree]
observed_types = set(child_node_types)
unexpected_types = observed_types.difference(expected_types)
assert unexpected_types == set(), \
"Observed types ({}) contain unexpected types ({})".format(observed_types, unexpected_types)
subtree_strings = [rsttree2rstlatex(grandchild, indent_level=indent_level+1)
for child in tree
for grandchild in child]
if observed_types == set('N'): # relation only consists of nucleii
return indent_tab(make_multinuc(relname=relname, nucleii=subtree_strings), indent_level)
elif relname == MULTISAT_RELNAME: # multiple relations sharing the same nucleus
relations = [grandchild for child in tree for grandchild in child]
relnames = [rel.label() for rel in relations]
nuctypes_per_relation = [[elem.label() for elem in relation] for relation in relations]
subtree_strings_per_relation = [[rsttree2rstlatex(elem[0]) for elem in relation] for relation in relations]
nucsat_tuples = zip(relnames, nuctypes_per_relation, subtree_strings_per_relation)
return indent_tab(make_multisat(nucsat_tuples), indent_level)
else: # a "normal" relation between one nucleus and one satellite
assert len(child_node_types) == 2, "A nuc/sat relationship must consist of two elements"
return indent_tab(make_nucsat(relname, child_node_types, subtree_strings), indent_level)
elif node_type == 'edu':
return " ".join(tree.split())
elif node_type in ('N', 'S'): # a single segment not in any relation
return indent_tab(string.Template("\rstsegment{$content}").substitute(content=tree[0]), indent_level)
else:
raise ValueError("Can't handle this node: {}".format(tree.label()))
def indent(text, amount, ch=' '):
"""Indents a string by the given amount of characters."""
padding = amount * ch
return ''.join(padding+line for line in text.splitlines(True))
def indent_tab(text, number):
"""Indents a string by the given number of tabs (one tab = 8 spaces)."""
return indent(text, number, '\t')
def write_rstlatex(tree, output_file=None):
"""Converts an RST tree into a rst.sty Latex string representation"""
return RSTLatexFileWriter(tree, output_filepath=output_file)
| bsd-3-clause | -8,106,414,627,086,261,000 | 37.211009 | 119 | 0.646218 | false | 3.650307 | false | false | false |
getsentry/zeus | tests/zeus/utils/test_metrics.py | 1 | 1845 | from time import time
from zeus.utils.metrics import Counter, HitCounter, gauge
def test_hit_counter():
current_ts = int(time())
c = HitCounter(size=3)
c.incr(current_ts=current_ts)
c.incr(current_ts=current_ts)
assert c.count(current_ts=current_ts) == 2
assert c.count(1, current_ts=current_ts) == 2
assert c.count(2, current_ts=current_ts) == 2
current_ts += 1
c.incr(current_ts=current_ts)
assert c.count(current_ts=current_ts) == 3
assert c.count(1, current_ts=current_ts) == 1
assert c.count(2, current_ts=current_ts) == 3
current_ts += 1
c.incr(current_ts=current_ts)
assert c.count(current_ts=current_ts) == 4
assert c.count(1, current_ts=current_ts) == 1
assert c.count(2, current_ts=current_ts) == 2
current_ts += 1
c.incr(current_ts=current_ts)
assert c.count(current_ts=current_ts) == 3
assert c.count(1, current_ts=current_ts) == 1
assert c.count(2, current_ts=current_ts) == 2
# dont incr here as it will force a truncate, and we just want to test
# the fact that count skips invalid buckets
current_ts += 1
assert c.count(current_ts=current_ts) == 2
assert c.count(1, current_ts=current_ts) == 0
assert c.count(2, current_ts=current_ts) == 1
current_ts += 1
assert c.count(current_ts=current_ts) == 1
assert c.count(1, current_ts=current_ts) == 0
assert c.count(2, current_ts=current_ts) == 0
current_ts += 1
assert c.count(current_ts=current_ts) == 0
assert c.count(1, current_ts=current_ts) == 0
assert c.count(2, current_ts=current_ts) == 0
def test_gauge():
counter = Counter()
with gauge(counter):
assert counter.value == 1
with gauge(counter):
assert counter.value == 2
assert counter.value == 1
assert counter.value == 0
| apache-2.0 | -3,649,925,010,883,005,400 | 30.810345 | 74 | 0.63794 | false | 3.085284 | false | false | false |
PriviPK/privipk-sync-engine | inbox/util/misc.py | 3 | 5298 | import sys
import pkgutil
import time
import re
from datetime import datetime
from email.utils import parsedate_tz, mktime_tz
from inbox.log import get_logger
from inbox.providers import providers
class ProviderSpecificException(Exception):
pass
def or_none(value, selector):
if value is None:
return None
else:
return selector(value)
def strip_plaintext_quote(text):
"""
Strip out quoted text with no inline responses.
TODO: Make sure that the line before the quote looks vaguely like
a quote header. May be hard to do in an internationalized manner?
"""
found_quote = False
lines = text.strip().splitlines()
quote_start = None
for i, line in enumerate(lines):
if line.startswith('>'):
found_quote = True
if quote_start is None:
quote_start = i
else:
found_quote = False
if found_quote:
return '\n'.join(lines[:quote_start - 1])
else:
return text
def parse_ml_headers(headers):
"""
Parse the mailing list headers described in RFC 4021,
these headers are optional (RFC 2369).
"""
attrs = {}
attrs['List-Archive'] = headers.get('List-Archive')
attrs['List-Help'] = headers.get('List-Help')
attrs['List-Id'] = headers.get('List-Id')
attrs['List-Owner'] = headers.get('List-Owner')
attrs['List-Post'] = headers.get('List-Post')
attrs['List-Subscribe'] = headers.get('List-Subscribe')
attrs['List-Unsubscribe'] = headers.get('List-Unsubscribe')
return attrs
def parse_references(references, in_reply_to):
"""
Parse a References: header and returns an array of MessageIDs.
The returned array contains the MessageID in In-Reply-To if
the header is present.
Parameters
----------
references: string
the contents of the referfences header
in_reply_to: string
the contents of the in-reply-to header
Returns
-------
list of MessageIds (strings) or an empty list.
"""
replyto = in_reply_to.split()[0] if in_reply_to else in_reply_to
if not references:
if replyto:
return [replyto]
else:
return []
references = references.split()
if replyto not in references:
references.append(replyto)
return references
def dt_to_timestamp(dt):
return int((dt - datetime(1970, 1, 1)).total_seconds())
def get_internaldate(date, received):
""" Get the date from the headers. """
if date is None:
other, date = received.split(';')
# All in UTC
parsed_date = parsedate_tz(date)
timestamp = mktime_tz(parsed_date)
dt = datetime.utcfromtimestamp(timestamp)
return dt
def timed(fn):
""" A decorator for timing methods. """
def timed_fn(self, *args, **kwargs):
start_time = time.time()
ret = fn(self, *args, **kwargs)
# TODO some modules like gmail.py don't have self.logger
try:
if self.log:
fn_logger = self.log
except AttributeError:
fn_logger = get_logger()
# out = None
fn_logger.info('[timer] {0} took {1:.3f} seconds.'.format(
str(fn), float(time.time() - start_time)))
return ret
return timed_fn
# Based on: http://stackoverflow.com/a/8556471
def load_modules(base_name, base_path):
"""
Imports all modules underneath `base_module` in the module tree.
Note that if submodules are located in different directory trees, you
need to use `pkgutil.extend_path` to make all the folders appear in
the module's `__path__`.
Returns
-------
list
All the modules in the base module tree.
"""
modules = []
for importer, module_name, _ in pkgutil.iter_modules(base_path):
full_module_name = '{}.{}'.format(base_name, module_name)
if full_module_name not in sys.modules:
module = importer.find_module(module_name).load_module(
full_module_name)
else:
module = sys.modules[full_module_name]
modules.append(module)
return modules
def register_backends(base_name, base_path):
"""
Dynamically loads all packages contained within thread
backends module, including those by other module install paths
"""
modules = load_modules(base_name, base_path)
mod_for = {}
for module in modules:
if hasattr(module, 'PROVIDER'):
provider_name = module.PROVIDER
if provider_name == 'generic':
for p_name, p in providers.iteritems():
p_type = p.get('type', None)
if p_type == 'generic' and p_name not in mod_for:
mod_for[p_name] = module
else:
mod_for[provider_name] = module
return mod_for
def cleanup_subject(subject_str):
"""Clean-up a message subject-line.
For instance, 'Re: Re: Re: Birthday party' becomes 'Birthday party'"""
if subject_str is None:
return ''
# TODO consider expanding to all
# http://en.wikipedia.org/wiki/List_of_email_subject_abbreviations
cleanup_regexp = "(?i)^((re|fw|fwd|aw|wg):\s*)+"
return re.sub(cleanup_regexp, "", subject_str)
| agpl-3.0 | 4,643,929,602,232,661,000 | 25.893401 | 74 | 0.61174 | false | 3.927354 | false | false | false |
apache/incubator-superset | superset/dashboards/commands/bulk_delete.py | 3 | 2873 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import List, Optional
from flask_appbuilder.security.sqla.models import User
from flask_babel import lazy_gettext as _
from superset.commands.base import BaseCommand
from superset.commands.exceptions import DeleteFailedError
from superset.dashboards.commands.exceptions import (
DashboardBulkDeleteFailedError,
DashboardBulkDeleteFailedReportsExistError,
DashboardForbiddenError,
DashboardNotFoundError,
)
from superset.dashboards.dao import DashboardDAO
from superset.exceptions import SupersetSecurityException
from superset.models.dashboard import Dashboard
from superset.reports.dao import ReportScheduleDAO
from superset.views.base import check_ownership
logger = logging.getLogger(__name__)
class BulkDeleteDashboardCommand(BaseCommand):
def __init__(self, user: User, model_ids: List[int]):
self._actor = user
self._model_ids = model_ids
self._models: Optional[List[Dashboard]] = None
def run(self) -> None:
self.validate()
try:
DashboardDAO.bulk_delete(self._models)
return None
except DeleteFailedError as ex:
logger.exception(ex.exception)
raise DashboardBulkDeleteFailedError()
def validate(self) -> None:
# Validate/populate model exists
self._models = DashboardDAO.find_by_ids(self._model_ids)
if not self._models or len(self._models) != len(self._model_ids):
raise DashboardNotFoundError()
# Check there are no associated ReportSchedules
reports = ReportScheduleDAO.find_by_dashboard_ids(self._model_ids)
if reports:
report_names = [report.name for report in reports]
raise DashboardBulkDeleteFailedReportsExistError(
_("There are associated alerts or reports: %s" % ",".join(report_names))
)
# Check ownership
for model in self._models:
try:
check_ownership(model)
except SupersetSecurityException:
raise DashboardForbiddenError()
| apache-2.0 | 3,141,048,109,437,204,500 | 38.902778 | 88 | 0.714236 | false | 4.300898 | false | false | false |
t1m0thyj/aiyprojects-raspbian | src/aiy/_drivers/_tts.py | 1 | 2331 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper around a TTS system."""
import functools
import logging
import os
import subprocess
import tempfile
from aiy import i18n
# Path to a tmpfs directory to avoid SD card wear
TMP_DIR = '/run/user/%d' % os.getuid()
logger = logging.getLogger('tts')
def create_say(player):
"""Return a function say(words) for the given player."""
lang = i18n.get_language_code()
return functools.partial(say, player, lang=lang)
def say(player, words, lang='en-US', volume=60, pitch=130):
"""Say the given words with TTS.
Args:
player: To play the text-to-speech audio.
words: string to say aloud.
lang: language for the text-to-speech engine.
volume: volume for the text-to-speech engine.
pitch: pitch for the text-to-speech engine.
"""
try:
(fd, tts_wav) = tempfile.mkstemp(suffix='.wav', dir=TMP_DIR)
except IOError:
logger.exception('Using fallback directory for TTS output')
(fd, tts_wav) = tempfile.mkstemp(suffix='.wav')
os.close(fd)
words = '<volume level="' + str(volume) + '"><pitch level="' + str(pitch) + \
'">' + words + '</pitch></volume>'
try:
subprocess.call(['pico2wave', '--lang', lang, '-w', tts_wav, words])
player.play_wav(tts_wav)
finally:
os.unlink(tts_wav)
def _main():
import argparse
from aiy import audio
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='Test TTS wrapper')
parser.add_argument('words', nargs='*', help='Words to say')
args = parser.parse_args()
if args.words:
words = ' '.join(args.words)
player = audio.get_player()
create_say(player)(words)
if __name__ == '__main__':
_main()
| apache-2.0 | -8,534,463,058,438,349,000 | 28.884615 | 81 | 0.659374 | false | 3.630841 | false | false | false |
Pluto-tv/chromium-crosswalk | tools/telemetry/third_party/gsutilz/gslib/third_party/storage_apitools/storage_v1_client.py | 12 | 44162 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generated client library for storage version v1."""
import os
import sys
from apitools.base.py import base_api
import gslib
from gslib.third_party.storage_apitools import storage_v1_messages as messages
class StorageV1(base_api.BaseApiClient):
"""Generated client library for service storage version v1."""
MESSAGES_MODULE = messages
_PACKAGE = u'storage'
_SCOPES = [u'https://www.googleapis.com/auth/devstorage.full_control', u'https://www.googleapis.com/auth/devstorage.read_only', u'https://www.googleapis.com/auth/devstorage.read_write']
_VERSION = u'v1'
_CLIENT_ID = 'nomatter'
_CLIENT_SECRET = 'nomatter'
_USER_AGENT = 'apitools gsutil/%s (%s)' % (gslib.VERSION, sys.platform)
if os.environ.get('CLOUDSDK_WRAPPER') == '1':
_USER_AGENT += ' Cloud SDK Command Line Tool'
if os.environ.get('CLOUDSDK_VERSION'):
_USER_AGENT += ' %s' % os.environ.get('CLOUDSDK_VERSION')
_CLIENT_CLASS_NAME = u'StorageV1'
_URL_VERSION = u'v1'
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
version=_VERSION):
"""Create a new storage handle."""
url = url or u'https://www.googleapis.com/storage/v1/'
super(StorageV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params)
self._version = version
self.bucketAccessControls = self.BucketAccessControlsService(self)
self.buckets = self.BucketsService(self)
self.channels = self.ChannelsService(self)
self.defaultObjectAccessControls = self.DefaultObjectAccessControlsService(self)
self.objectAccessControls = self.ObjectAccessControlsService(self)
self.objects = self.ObjectsService(self)
class BucketAccessControlsService(base_api.BaseApiService):
"""Service class for the bucketAccessControls resource."""
_NAME = u'bucketAccessControls'
def __init__(self, client):
super(StorageV1.BucketAccessControlsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.bucketAccessControls.delete',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/acl/{entity}',
request_field='',
request_type_name=u'StorageBucketAccessControlsDeleteRequest',
response_type_name=u'StorageBucketAccessControlsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.bucketAccessControls.get',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/acl/{entity}',
request_field='',
request_type_name=u'StorageBucketAccessControlsGetRequest',
response_type_name=u'BucketAccessControl',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.bucketAccessControls.insert',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[],
relative_path=u'b/{bucket}/acl',
request_field='<request>',
request_type_name=u'BucketAccessControl',
response_type_name=u'BucketAccessControl',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.bucketAccessControls.list',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[],
relative_path=u'b/{bucket}/acl',
request_field='',
request_type_name=u'StorageBucketAccessControlsListRequest',
response_type_name=u'BucketAccessControls',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.bucketAccessControls.patch',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/acl/{entity}',
request_field='<request>',
request_type_name=u'BucketAccessControl',
response_type_name=u'BucketAccessControl',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.bucketAccessControls.update',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/acl/{entity}',
request_field='<request>',
request_type_name=u'BucketAccessControl',
response_type_name=u'BucketAccessControl',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Permanently deletes the ACL entry for the specified entity on the specified bucket.
Args:
request: (StorageBucketAccessControlsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageBucketAccessControlsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the ACL entry for the specified entity on the specified bucket.
Args:
request: (StorageBucketAccessControlsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControl) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new ACL entry on the specified bucket.
Args:
request: (BucketAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControl) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves ACL entries on the specified bucket.
Args:
request: (StorageBucketAccessControlsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControls) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates an ACL entry on the specified bucket. This method supports patch semantics.
Args:
request: (BucketAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControl) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates an ACL entry on the specified bucket.
Args:
request: (BucketAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControl) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class BucketsService(base_api.BaseApiService):
"""Service class for the buckets resource."""
_NAME = u'buckets'
def __init__(self, client):
super(StorageV1.BucketsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.buckets.delete',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
relative_path=u'b/{bucket}',
request_field='',
request_type_name=u'StorageBucketsDeleteRequest',
response_type_name=u'StorageBucketsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.buckets.get',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'projection'],
relative_path=u'b/{bucket}',
request_field='',
request_type_name=u'StorageBucketsGetRequest',
response_type_name=u'Bucket',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.buckets.insert',
ordered_params=[u'project'],
path_params=[],
query_params=[u'predefinedAcl', u'predefinedDefaultObjectAcl', u'project', u'projection'],
relative_path=u'b',
request_field=u'bucket',
request_type_name=u'StorageBucketsInsertRequest',
response_type_name=u'Bucket',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.buckets.list',
ordered_params=[u'project'],
path_params=[],
query_params=[u'maxResults', u'pageToken', u'prefix', u'project', u'projection'],
relative_path=u'b',
request_field='',
request_type_name=u'StorageBucketsListRequest',
response_type_name=u'Buckets',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.buckets.patch',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'predefinedDefaultObjectAcl', u'projection'],
relative_path=u'b/{bucket}',
request_field=u'bucketResource',
request_type_name=u'StorageBucketsPatchRequest',
response_type_name=u'Bucket',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.buckets.update',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'predefinedDefaultObjectAcl', u'projection'],
relative_path=u'b/{bucket}',
request_field=u'bucketResource',
request_type_name=u'StorageBucketsUpdateRequest',
response_type_name=u'Bucket',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Permanently deletes an empty bucket.
Args:
request: (StorageBucketsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageBucketsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns metadata for the specified bucket.
Args:
request: (StorageBucketsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Bucket) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new bucket.
Args:
request: (StorageBucketsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Bucket) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves a list of buckets for a given project.
Args:
request: (StorageBucketsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Buckets) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates a bucket. This method supports patch semantics.
Args:
request: (StorageBucketsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Bucket) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates a bucket.
Args:
request: (StorageBucketsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Bucket) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class ChannelsService(base_api.BaseApiService):
"""Service class for the channels resource."""
_NAME = u'channels'
def __init__(self, client):
super(StorageV1.ChannelsService, self).__init__(client)
self._method_configs = {
'Stop': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.channels.stop',
ordered_params=[],
path_params=[],
query_params=[],
relative_path=u'channels/stop',
request_field='<request>',
request_type_name=u'Channel',
response_type_name=u'StorageChannelsStopResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Stop(self, request, global_params=None):
"""Stop watching resources through this channel.
Args:
request: (Channel) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageChannelsStopResponse) The response message.
"""
config = self.GetMethodConfig('Stop')
return self._RunMethod(
config, request, global_params=global_params)
class DefaultObjectAccessControlsService(base_api.BaseApiService):
"""Service class for the defaultObjectAccessControls resource."""
_NAME = u'defaultObjectAccessControls'
def __init__(self, client):
super(StorageV1.DefaultObjectAccessControlsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.defaultObjectAccessControls.delete',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
request_field='',
request_type_name=u'StorageDefaultObjectAccessControlsDeleteRequest',
response_type_name=u'StorageDefaultObjectAccessControlsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.defaultObjectAccessControls.get',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
request_field='',
request_type_name=u'StorageDefaultObjectAccessControlsGetRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.defaultObjectAccessControls.insert',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl',
request_field='<request>',
request_type_name=u'ObjectAccessControl',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.defaultObjectAccessControls.list',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
relative_path=u'b/{bucket}/defaultObjectAcl',
request_field='',
request_type_name=u'StorageDefaultObjectAccessControlsListRequest',
response_type_name=u'ObjectAccessControls',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.defaultObjectAccessControls.patch',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
request_field='<request>',
request_type_name=u'ObjectAccessControl',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.defaultObjectAccessControls.update',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
request_field='<request>',
request_type_name=u'ObjectAccessControl',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Permanently deletes the default object ACL entry for the specified entity on the specified bucket.
Args:
request: (StorageDefaultObjectAccessControlsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageDefaultObjectAccessControlsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the default object ACL entry for the specified entity on the specified bucket.
Args:
request: (StorageDefaultObjectAccessControlsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new default object ACL entry on the specified bucket.
Args:
request: (ObjectAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves default object ACL entries on the specified bucket.
Args:
request: (StorageDefaultObjectAccessControlsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControls) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates a default object ACL entry on the specified bucket. This method supports patch semantics.
Args:
request: (ObjectAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates a default object ACL entry on the specified bucket.
Args:
request: (ObjectAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class ObjectAccessControlsService(base_api.BaseApiService):
"""Service class for the objectAccessControls resource."""
_NAME = u'objectAccessControls'
def __init__(self, client):
super(StorageV1.ObjectAccessControlsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.objectAccessControls.delete',
ordered_params=[u'bucket', u'object', u'entity'],
path_params=[u'bucket', u'entity', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl/{entity}',
request_field='',
request_type_name=u'StorageObjectAccessControlsDeleteRequest',
response_type_name=u'StorageObjectAccessControlsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.objectAccessControls.get',
ordered_params=[u'bucket', u'object', u'entity'],
path_params=[u'bucket', u'entity', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl/{entity}',
request_field='',
request_type_name=u'StorageObjectAccessControlsGetRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objectAccessControls.insert',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl',
request_field=u'objectAccessControl',
request_type_name=u'StorageObjectAccessControlsInsertRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.objectAccessControls.list',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl',
request_field='',
request_type_name=u'StorageObjectAccessControlsListRequest',
response_type_name=u'ObjectAccessControls',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.objectAccessControls.patch',
ordered_params=[u'bucket', u'object', u'entity'],
path_params=[u'bucket', u'entity', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl/{entity}',
request_field=u'objectAccessControl',
request_type_name=u'StorageObjectAccessControlsPatchRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.objectAccessControls.update',
ordered_params=[u'bucket', u'object', u'entity'],
path_params=[u'bucket', u'entity', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl/{entity}',
request_field=u'objectAccessControl',
request_type_name=u'StorageObjectAccessControlsUpdateRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Permanently deletes the ACL entry for the specified entity on the specified object.
Args:
request: (StorageObjectAccessControlsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageObjectAccessControlsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the ACL entry for the specified entity on the specified object.
Args:
request: (StorageObjectAccessControlsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new ACL entry on the specified object.
Args:
request: (StorageObjectAccessControlsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves ACL entries on the specified object.
Args:
request: (StorageObjectAccessControlsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControls) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates an ACL entry on the specified object. This method supports patch semantics.
Args:
request: (StorageObjectAccessControlsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates an ACL entry on the specified object.
Args:
request: (StorageObjectAccessControlsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class ObjectsService(base_api.BaseApiService):
"""Service class for the objects resource."""
_NAME = u'objects'
def __init__(self, client):
super(StorageV1.ObjectsService, self).__init__(client)
self._method_configs = {
'Compose': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.compose',
ordered_params=[u'destinationBucket', u'destinationObject'],
path_params=[u'destinationBucket', u'destinationObject'],
query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifMetagenerationMatch'],
relative_path=u'b/{destinationBucket}/o/{destinationObject}/compose',
request_field=u'composeRequest',
request_type_name=u'StorageObjectsComposeRequest',
response_type_name=u'Object',
supports_download=True,
),
'Copy': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.copy',
ordered_params=[u'sourceBucket', u'sourceObject', u'destinationBucket', u'destinationObject'],
path_params=[u'destinationBucket', u'destinationObject', u'sourceBucket', u'sourceObject'],
query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'ifSourceGenerationMatch', u'ifSourceGenerationNotMatch', u'ifSourceMetagenerationMatch', u'ifSourceMetagenerationNotMatch', u'projection', u'sourceGeneration'],
relative_path=u'b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}',
request_field=u'object',
request_type_name=u'StorageObjectsCopyRequest',
response_type_name=u'Object',
supports_download=True,
),
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.objects.delete',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
relative_path=u'b/{bucket}/o/{object}',
request_field='',
request_type_name=u'StorageObjectsDeleteRequest',
response_type_name=u'StorageObjectsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.objects.get',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'projection'],
relative_path=u'b/{bucket}/o/{object}',
request_field='',
request_type_name=u'StorageObjectsGetRequest',
response_type_name=u'Object',
supports_download=True,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.insert',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'contentEncoding', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'name', u'predefinedAcl', u'projection'],
relative_path=u'b/{bucket}/o',
request_field=u'object',
request_type_name=u'StorageObjectsInsertRequest',
response_type_name=u'Object',
supports_download=True,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.objects.list',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'delimiter', u'maxResults', u'pageToken', u'prefix', u'projection', u'versions'],
relative_path=u'b/{bucket}/o',
request_field='',
request_type_name=u'StorageObjectsListRequest',
response_type_name=u'Objects',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.objects.patch',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'projection'],
relative_path=u'b/{bucket}/o/{object}',
request_field=u'objectResource',
request_type_name=u'StorageObjectsPatchRequest',
response_type_name=u'Object',
supports_download=False,
),
'Rewrite': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.rewrite',
ordered_params=[u'sourceBucket', u'sourceObject', u'destinationBucket', u'destinationObject'],
path_params=[u'destinationBucket', u'destinationObject', u'sourceBucket', u'sourceObject'],
query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'ifSourceGenerationMatch', u'ifSourceGenerationNotMatch', u'ifSourceMetagenerationMatch', u'ifSourceMetagenerationNotMatch', u'maxBytesRewrittenPerCall', u'projection', u'rewriteToken', u'sourceGeneration'],
relative_path=u'b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}',
request_field=u'object',
request_type_name=u'StorageObjectsRewriteRequest',
response_type_name=u'RewriteResponse',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.objects.update',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'projection'],
relative_path=u'b/{bucket}/o/{object}',
request_field=u'objectResource',
request_type_name=u'StorageObjectsUpdateRequest',
response_type_name=u'Object',
supports_download=True,
),
'WatchAll': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.watchAll',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'delimiter', u'maxResults', u'pageToken', u'prefix', u'projection', u'versions'],
relative_path=u'b/{bucket}/o/watch',
request_field=u'channel',
request_type_name=u'StorageObjectsWatchAllRequest',
response_type_name=u'Channel',
supports_download=False,
),
}
self._upload_configs = {
'Insert': base_api.ApiUploadInfo(
accept=['*/*'],
max_size=None,
resumable_multipart=True,
resumable_path=u'/resumable/upload/storage/' + self._client._version + '/b/{bucket}/o',
simple_multipart=True,
simple_path=u'/upload/storage/' + self._client._version + '/b/{bucket}/o',
),
}
def Compose(self, request, global_params=None, download=None):
"""Concatenates a list of existing objects into a new object in the same bucket.
Args:
request: (StorageObjectsComposeRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Compose')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
def Copy(self, request, global_params=None, download=None):
"""Copies an object to a specified location. Optionally overrides metadata.
Args:
request: (StorageObjectsCopyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Copy')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
def Delete(self, request, global_params=None):
"""Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.
Args:
request: (StorageObjectsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageObjectsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None, download=None):
"""Retrieves an object or its metadata.
Args:
request: (StorageObjectsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
def Insert(self, request, global_params=None, upload=None, download=None):
"""Stores a new object and metadata.
Args:
request: (StorageObjectsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
upload: (Upload, default: None) If present, upload
this stream with the request.
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Insert')
upload_config = self.GetUploadConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params,
upload=upload, upload_config=upload_config,
download=download)
def List(self, request, global_params=None):
"""Retrieves a list of objects matching the criteria.
Args:
request: (StorageObjectsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Objects) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates an object's metadata. This method supports patch semantics.
Args:
request: (StorageObjectsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Rewrite(self, request, global_params=None):
"""Rewrites a source object to a destination object. Optionally overrides metadata.
Args:
request: (StorageObjectsRewriteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(RewriteResponse) The response message.
"""
config = self.GetMethodConfig('Rewrite')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None, download=None):
"""Updates an object's metadata.
Args:
request: (StorageObjectsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
def WatchAll(self, request, global_params=None):
"""Watch for changes on all objects in a bucket.
Args:
request: (StorageObjectsWatchAllRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Channel) The response message.
"""
config = self.GetMethodConfig('WatchAll')
return self._RunMethod(
config, request, global_params=global_params)
| bsd-3-clause | 2,455,258,158,905,939,000 | 41.341323 | 367 | 0.621349 | false | 4.343661 | true | false | false |
biota/sourcetracker2 | sourcetracker/tests/test_sourcetracker.py | 1 | 58590 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Biota Technology.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from unittest import TestCase, main
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sourcetracker._sourcetracker import (intersect_and_sort_samples,
collapse_source_data,
subsample_dataframe,
validate_gibbs_input,
validate_gibbs_parameters,
collate_gibbs_results,
get_samples,
generate_environment_assignments,
cumulative_proportions,
single_sink_feature_table,
ConditionalProbability,
gibbs_sampler, gibbs)
from sourcetracker._plot import plot_heatmap
class TestValidateGibbsInput(TestCase):
def setUp(self):
self.index = ['s%s' % i for i in range(5)]
self.columns = ['f%s' % i for i in range(4)]
def test_no_errors_(self):
# A table where nothing is wrong, no changes expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
exp_sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs, sources)
# Sources and sinks.
sinks = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sinks = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
pd.util.testing.assert_frame_equal(obs_sinks, exp_sinks)
def test_float_data(self):
# Data is float, expect rounding.
data = np.random.uniform(0, 1, size=20).reshape(5, 4)
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.zeros(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 1.
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
# Sources and sinks.
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 5
sinks = pd.DataFrame(data,
index=self.index,
columns=self.columns)
exp_sinks = \
pd.DataFrame(5 * np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
pd.util.testing.assert_frame_equal(obs_sinks, exp_sinks)
def test_negative_data(self):
# Values less than 0, expect errors.
data = np.random.uniform(0, 1, size=20).reshape(5, 4) - 1.
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
data = -1 * np.random.randint(0, 20, size=20).reshape(5, 4)
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4) + 1
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks = pd.DataFrame(-10 * data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_nan_data(self):
# nans, expect errors.
data = np.random.uniform(0, 1, size=20).reshape(5, 4)
data[3, 2] = np.nan
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4) + 1.
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
data[1, 3] = np.nan
sinks = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_non_numeric_data(self):
# data contains at least some non-numeric columns, expect errors.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sources.iloc[2, 2] = '3.a'
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks.iloc[2, 2] = '3'
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_columns_identical(self):
# Columns are identical, no error expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
data = np.random.randint(0, 10, size=200).reshape(50, 4)
sinks = pd.DataFrame(data.astype(np.int32),
index=['s%s' % i for i in range(50)],
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, sources)
pd.util.testing.assert_frame_equal(obs_sinks, sinks)
def test_columns_non_identical(self):
# Columns are not identical, error expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
data = np.random.randint(0, 10, size=200).reshape(50, 4)
sinks = pd.DataFrame(data.astype(np.int32),
index=['s%s' % i for i in range(50)],
columns=['feature%s' % i for i in range(4)])
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
class TestValidateGibbsParams(TestCase):
def test_acceptable_inputs(self):
# All values acceptable, expect no errors.
alpha1 = .001
alpha2 = .1
beta = 10
restarts = 10
draws_per_restart = 1
burnin = 100
delay = 1
self.assertTrue(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
alpha1 = alpha2 = beta = 0
self.assertTrue(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
def test_not_acceptable_inputs(self):
# One of the float params is negative.
alpha1 = -.001
alpha2 = .1
beta = 10
restarts = 10
draws_per_restart = 1
burnin = 100
delay = 1
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# One of the int params is 0.
alpha1 = .001
restarts = 0
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# One of the int params is a float.
restarts = 1.34
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# A param is a string.
restarts = '3.2232'
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# A param is a nan.
restarts = 3
alpha1 = np.nan
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
class TestIntersectAndSortSamples(TestCase):
def test_partially_overlapping_tables(self):
# Test an example where there are unshared samples present in both
# feature and sample tables. Notice that order is different between
# the samples that are shared between both tables. The order of samples
# in the returned tables is set by the ordering done in np.intersect1d.
sdata_c1 = [3.1, 'red', 5]
sdata_c2 = [3.6, 'yellow', 7]
sdata_c3 = [3.9, 'yellow', -2]
sdata_c4 = [2.5, 'red', 5]
sdata_c5 = [6.7, 'blue', 10]
samples = ['s1', 's4', 's2', 's3', 'sX']
headers = ['pH', 'color', 'day']
stable = pd.DataFrame([sdata_c1, sdata_c4, sdata_c2, sdata_c3,
sdata_c5], index=samples, columns=headers)
fdata = np.arange(90).reshape(9, 10)
samples = ['s%i' % i for i in range(3, 12)]
columns = ['o%i' % i for i in range(1, 11)]
ftable = pd.DataFrame(fdata, index=samples, columns=columns)
exp_ftable = pd.DataFrame(fdata[[1, 0], :], index=['s4', 's3'],
columns=columns)
exp_stable = pd.DataFrame([sdata_c4, sdata_c3], index=['s4', 's3'],
columns=headers)
obs_stable, obs_ftable = intersect_and_sort_samples(stable, ftable)
pd.util.testing.assert_frame_equal(obs_stable, exp_stable)
pd.util.testing.assert_frame_equal(obs_ftable, exp_ftable)
# No shared samples, expect a ValueError.
ftable.index = ['ss%i' % i for i in range(9)]
self.assertRaises(ValueError, intersect_and_sort_samples, stable,
ftable)
# All samples shared, expect no changes.
fdata = np.arange(50).reshape(5, 10)
samples = ['s1', 's4', 's2', 's3', 'sX']
columns = ['o%i' % i for i in range(10)]
ftable = pd.DataFrame(fdata, index=samples, columns=columns)
exp_ftable = ftable.loc[stable.index, :]
exp_stable = stable
obs_stable, obs_ftable = intersect_and_sort_samples(stable, ftable)
pd.util.testing.assert_frame_equal(obs_stable, exp_stable)
pd.util.testing.assert_frame_equal(obs_ftable, exp_ftable)
class TestGetSamples(TestCase):
def tests(self):
# Make a dataframe which contains mixed data to test.
col0 = ['a', 'a', 'a', 'a', 'b']
col1 = [3, 2, 3, 1, 3]
col2 = ['red', 'red', 'blue', 255, 255]
headers = ['sample_location', 'num_reps', 'color']
samples = ['s1', 's2', 's3', 's4', 's5']
sample_metadata = \
pd.DataFrame.from_dict({k: v for k, v in zip(headers,
[col0, col1, col2])})
sample_metadata.index = samples
obs = get_samples(sample_metadata, 'sample_location', 'b')
exp = pd.Index(['s5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'sample_location', 'a')
exp = pd.Index(['s1', 's2', 's3', 's4'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'color', 255)
exp = pd.Index(['s4', 's5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'num_reps', 3)
exp = pd.Index(['s1', 's3', 's5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
class TestCollapseSourceData(TestCase):
def test_example1(self):
# Simple example with 'sum' as collapse mode.
samples = ['sample1', 'sample2', 'sample3', 'sample4']
category = 'pH'
values = [3.0, 0.4, 3.0, 3.0]
stable = pd.DataFrame(values, index=samples, columns=[category])
fdata = np.array([[10, 50, 10, 70],
[0, 25, 10, 5],
[0, 25, 10, 5],
[100, 0, 10, 5]])
ftable = pd.DataFrame(fdata, index=stable.index,
columns=map(str, np.arange(4)))
source_samples = ['sample1', 'sample2', 'sample3']
method = 'sum'
obs = collapse_source_data(stable, ftable, source_samples, category,
method)
exp_data = np.vstack((fdata[1, :], fdata[0, :] + fdata[2, :]))
exp_index = [0.4, 3.0]
exp = pd.DataFrame(exp_data.astype(np.int32), index=exp_index,
columns=map(str, np.arange(4)))
exp.index.name = 'collapse_col'
pd.util.testing.assert_frame_equal(obs, exp)
# Example with collapse mode 'mean'. This will cause non-integer values
# to be present, which the validate_gibbs_input should catch.
source_samples = ['sample1', 'sample2', 'sample3', 'sample4']
method = 'mean'
obs = collapse_source_data(stable, ftable, source_samples, category,
method)
exp_data = np.vstack((fdata[1, :],
fdata[[0, 2, 3], :].mean(0))).astype(np.int32)
exp_index = [0.4, 3.0]
exp = pd.DataFrame(exp_data.astype(np.int32), index=exp_index,
columns=map(str, np.arange(4)))
exp.index.name = 'collapse_col'
pd.util.testing.assert_frame_equal(obs, exp)
def test_example2(self):
# Test on another arbitrary example.
data = np.arange(200).reshape(20, 10)
oids = ['o%s' % i for i in range(20)]
sids = ['s%s' % i for i in range(10)]
ftable = pd.DataFrame(data.T, index=sids, columns=oids)
_stable = \
{'s4': {'cat1': '2', 'cat2': 'x', 'cat3': 'A', 'cat4': 'D'},
's0': {'cat1': '1', 'cat2': 'y', 'cat3': 'z', 'cat4': 'D'},
's1': {'cat1': '1', 'cat2': 'x', 'cat3': 'A', 'cat4': 'C'},
's3': {'cat1': '2', 'cat2': 'y', 'cat3': 'z', 'cat4': 'A'},
's2': {'cat1': '2', 'cat2': 'x', 'cat3': 'A', 'cat4': 'D'},
's6': {'cat1': '1', 'cat2': 'y', 'cat3': 'z', 'cat4': 'R'},
's5': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'},
's7': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'},
's9': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'},
's8': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'}}
stable = pd.DataFrame(_stable).T
category = 'cat4'
source_samples = ['s4', 's9', 's0', 's2']
method = 'sum'
obs = collapse_source_data(stable, ftable, source_samples, category,
method)
exp_index = np.array(['0', 'D'])
exp_data = np.array([[9, 19, 29, 39, 49, 59, 69, 79, 89, 99, 109, 119,
129, 139, 149, 159, 169, 179, 189, 199],
[6, 36, 66, 96, 126, 156, 186, 216, 246, 276, 306,
336, 366, 396, 426, 456, 486, 516, 546, 576]],
dtype=np.int32)
exp = pd.DataFrame(exp_data, index=exp_index, columns=oids)
exp.index.name = 'collapse_col'
pd.util.testing.assert_frame_equal(obs, exp)
class TestSubsampleDataframe(TestCase):
def test_no_errors_expected(self):
# Testing this function deterministically is hard because cython is
# generating the PRNG calls. We'll settle for ensuring that the sums
# are correct.
fdata = np.array([[10, 50, 10, 70],
[0, 25, 10, 5],
[0, 25, 10, 5],
[100, 0, 10, 5]])
ftable = pd.DataFrame(fdata, index=['s1', 's2', 's3', 's4'],
columns=map(str, np.arange(4)))
n = 30
obs = subsample_dataframe(ftable, n)
self.assertTrue((obs.sum(axis=1) == n).all())
def test_subsample_with_replacement(self):
# Testing this function deterministically is hard because cython is
# generating the PRNG calls. We'll settle for ensuring that the sums
# are correct.
fdata = np.array([[10, 50, 10, 70],
[0, 25, 10, 5],
[0, 25, 10, 5],
[100, 0, 10, 5]])
ftable = pd.DataFrame(fdata, index=['s1', 's2', 's3', 's4'],
columns=map(str, np.arange(4)))
n = 30
obs = subsample_dataframe(ftable, n, replace=True)
self.assertTrue((obs.sum(axis=1) == n).all())
def test_shape_doesnt_change(self):
# Test that when features are removed by subsampling, the shape of the
# table does not change. Although rarifaction is stochastic, the
# probability that the below table does not lose at least one feature
# during rarefaction (and thus satisfy as the test of the condition we)
# are interested in) is nearly 0.
fdata = np.array([[0, 0, 0, 1e4],
[0, 0, 1, 1e4],
[0, 1, 0, 1e4],
[1, 0, 0, 1e4]]).astype(int)
ftable = pd.DataFrame(fdata, index=['s1', 's2', 's3', 's4'],
columns=map(str, np.arange(4)))
n = 10
obs = subsample_dataframe(ftable, n)
self.assertTrue((obs.sum(axis=1) == n).all())
self.assertEqual(obs.shape, ftable.shape)
class TestDataAggregationFunctions(TestCase):
'''Test that returned data is collated and written correctly.'''
def test_cumulative_proportions(self):
# 4 draws, 4 sources + unknown, 3 sinks
sink1_envcounts = np.array([[10, 100, 15, 0, 25],
[150, 0, 0, 0, 0],
[30, 30, 30, 30, 30],
[0, 11, 7, 35, 97]])
sink2_envcounts = np.array([[100, 10, 15, 0, 25],
[100, 0, 50, 0, 0],
[0, 60, 30, 30, 30],
[7, 11, 0, 35, 97]])
sink3_envcounts = np.array([[100, 10, 10, 5, 25],
[70, 20, 30, 30, 0],
[10, 30, 50, 30, 30],
[0, 27, 100, 20, 3]])
all_envcounts = [sink1_envcounts, sink2_envcounts, sink3_envcounts]
sink_ids = np.array(['sink1', 'sink2', 'sink3'])
source_ids = np.array(['source1', 'source2', 'source3', 'source4'])
cols = list(source_ids) + ['Unknown']
prp_r1 = np.array([190, 141, 52, 65, 152]) / 600.
prp_r2 = np.array([207, 81, 95, 65, 152]) / 600.
prp_r3 = np.array([180, 87, 190, 85, 58]) / 600.
prp_data = np.vstack([prp_r1, prp_r2, prp_r3])
prp_std_data = np.zeros((3, 5), dtype=np.float64)
prp_std_data[0, 0] = (np.array([10, 150, 30, 0]) / 600.).std()
prp_std_data[0, 1] = (np.array([100, 0, 30, 11]) / 600.).std()
prp_std_data[0, 2] = (np.array([15, 0, 30, 7]) / 600.).std()
prp_std_data[0, 3] = (np.array([0, 0, 30, 35]) / 600.).std()
prp_std_data[0, 4] = (np.array([25, 0, 30, 97]) / 600.).std()
prp_std_data[1, 0] = (np.array([100, 100, 0, 7]) / 600.).std()
prp_std_data[1, 1] = (np.array([10, 0, 60, 11]) / 600.).std()
prp_std_data[1, 2] = (np.array([15, 50, 30, 0]) / 600.).std()
prp_std_data[1, 3] = (np.array([0, 0, 30, 35]) / 600.).std()
prp_std_data[1, 4] = (np.array([25, 0, 30, 97]) / 600.).std()
prp_std_data[2, 0] = (np.array([100, 70, 10, 0]) / 600.).std()
prp_std_data[2, 1] = (np.array([10, 20, 30, 27]) / 600.).std()
prp_std_data[2, 2] = (np.array([10, 30, 50, 100]) / 600.).std()
prp_std_data[2, 3] = (np.array([5, 30, 30, 20]) / 600.).std()
prp_std_data[2, 4] = (np.array([25, 0, 30, 3]) / 600.).std()
exp_prp = pd.DataFrame(prp_data, index=sink_ids, columns=cols)
exp_prp_std = pd.DataFrame(prp_std_data, index=sink_ids, columns=cols)
obs_prp, obs_prp_std = cumulative_proportions(all_envcounts, sink_ids,
source_ids)
pd.util.testing.assert_frame_equal(obs_prp, exp_prp)
pd.util.testing.assert_frame_equal(obs_prp_std, exp_prp_std)
def test_single_sink_feature_table(self):
# 4 draws, depth of sink = 10, 5 sources + Unknown.
final_env_assignments = np.array([[5, 0, 0, 0, 2, 0, 1, 0, 3, 1],
[1, 1, 3, 3, 2, 2, 1, 1, 1, 1],
[4, 1, 4, 4, 4, 4, 1, 1, 3, 2],
[2, 1, 0, 5, 5, 5, 5, 1, 0, 2]])
# notice that each row is the same - they are determined by
# `generate_taxon_sequence` before the `gibbs_sampler` runs.
final_taxon_assignments = \
np.array([[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100]])
# we are allowing more taxa than we have found in this sample, i.e. the
# largest value in `final_taxon_assignments` will be smaller than the
# largest index in the columns of the final table.
nfeatures = 1250
nsources = 5
data = np.zeros((nsources + 1, nfeatures), dtype=np.int32)
# for the purpose of this test code, I'll increment data taxa by taxa.
data[np.array([5, 1, 4, 2]), 0] += 1
data[0, 3] += 3
data[1, 3] += 3
data[3, 3] += 1
data[4, 3] += 1
data[np.array([0, 3, 4, 5]), 227] += 1
data[0, 550] += 1
data[1, 550] += 3
data[2, 550] += 3
data[4, 550] += 2
data[5, 550] += 3
data[0, 999] += 2
data[1, 999] += 4
data[3, 999] += 2
data[1, 1100] += 2
data[2, 1100] += 2
exp_sources = ['source%s' % i for i in range(nsources)] + ['Unknown']
feature_ids = ['f%s' % i for i in range(1250)]
exp = pd.DataFrame(data, index=exp_sources, columns=feature_ids)
source_ids = np.array(['source%s' % i for i in range(nsources)])
obs = single_sink_feature_table(final_env_assignments,
final_taxon_assignments, source_ids,
feature_ids)
pd.util.testing.assert_frame_equal(obs, exp)
def test_collate_gibbs_results(self):
# We'll vary the depth of the sinks - simulating a situation where the
# user has not rarefied.
# We'll set:
# draws = 4
# sink_depths = [10, 15, 7]
# sources = 5 (+1 unknown)
final_env_counts_sink1 = np.array([[5, 2, 1, 1, 0, 1],
[0, 6, 2, 2, 0, 0],
[0, 3, 1, 1, 5, 0],
[2, 2, 2, 0, 0, 4]])
final_env_assignments_sink1 = \
np.array([[5, 0, 0, 0, 2, 0, 1, 0, 3, 1],
[1, 1, 3, 3, 2, 2, 1, 1, 1, 1],
[4, 1, 4, 4, 4, 4, 1, 1, 3, 2],
[2, 1, 0, 5, 5, 5, 5, 1, 0, 2]])
final_taxon_assignments_sink1 = \
np.array([[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100]])
final_env_counts_sink2 = np.array([[5, 1, 3, 2, 0, 4],
[1, 1, 4, 5, 1, 3],
[4, 1, 3, 2, 3, 2],
[2, 3, 3, 2, 1, 4]])
final_env_assignments_sink2 = \
np.array([[2, 5, 0, 5, 1, 5, 0, 0, 3, 0, 3, 5, 2, 2, 0],
[3, 2, 2, 3, 2, 3, 3, 5, 5, 1, 3, 4, 2, 0, 5],
[0, 2, 3, 2, 0, 0, 2, 4, 5, 4, 0, 5, 3, 1, 4],
[4, 3, 2, 1, 2, 5, 3, 5, 2, 0, 1, 0, 5, 1, 5]])
final_taxon_assignments_sink2 = \
np.array([[7, 7, 7, 7, 8, 8, 8, 8, 250, 250, 250, 250, 1249, 1249],
[7, 7, 7, 7, 8, 8, 8, 8, 250, 250, 250, 250, 1249, 1249],
[7, 7, 7, 7, 8, 8, 8, 8, 250, 250, 250, 250, 1249, 1249],
[7, 7, 7, 7, 8, 8, 8, 8, 250, 250, 250, 250, 1249, 1249]])
final_env_counts_sink3 = np.array([[4, 2, 0, 0, 1, 0],
[0, 3, 1, 0, 2, 1],
[0, 0, 1, 1, 3, 2],
[2, 1, 0, 3, 0, 1]])
final_env_assignments_sink3 = \
np.array([[4, 0, 0, 0, 1, 0, 1],
[1, 2, 1, 4, 5, 4, 1],
[4, 3, 5, 4, 4, 5, 2],
[3, 0, 1, 3, 3, 0, 5]])
final_taxon_assignments_sink3 = \
np.array([[3, 865, 865, 1100, 1100, 1100, 1249],
[3, 865, 865, 1100, 1100, 1100, 1249],
[3, 865, 865, 1100, 1100, 1100, 1249],
[3, 865, 865, 1100, 1100, 1100, 1249]])
# Create expected proportion data.
prp_data = np.zeros((3, 6), dtype=np.float64)
prp_std_data = np.zeros((3, 6), dtype=np.float64)
prp_data[0] = (final_env_counts_sink1.sum(0) /
final_env_counts_sink1.sum())
prp_data[1] = (final_env_counts_sink2.sum(0) /
final_env_counts_sink2.sum())
prp_data[2] = (final_env_counts_sink3.sum(0) /
final_env_counts_sink3.sum())
prp_std_data[0] = \
(final_env_counts_sink1 / final_env_counts_sink1.sum()).std(0)
prp_std_data[1] = \
(final_env_counts_sink2 / final_env_counts_sink2.sum()).std(0)
prp_std_data[2] = \
(final_env_counts_sink3 / final_env_counts_sink3.sum()).std(0)
sink_ids = ['sink1', 'sink2', 'sink3']
exp_sources = ['source%s' % i for i in range(5)] + ['Unknown']
feature_ids = ['f%s' % i for i in range(1250)]
exp_prp = pd.DataFrame(prp_data, index=sink_ids, columns=exp_sources)
exp_prp_std = pd.DataFrame(prp_std_data, index=sink_ids,
columns=exp_sources)
# Create expected feature table data.
ft1 = np.zeros((6, 1250), dtype=np.int32)
for r, c in zip(final_env_assignments_sink1.ravel(),
final_taxon_assignments_sink1.ravel()):
ft1[r, c] += 1
exp_ft1 = pd.DataFrame(ft1, index=exp_sources, columns=feature_ids)
ft2 = np.zeros((6, 1250), dtype=np.int32)
for r, c in zip(final_env_assignments_sink2.ravel(),
final_taxon_assignments_sink2.ravel()):
ft2[r, c] += 1
exp_ft2 = pd.DataFrame(ft2, index=exp_sources, columns=feature_ids)
ft3 = np.zeros((6, 1250), dtype=np.int32)
for r, c in zip(final_env_assignments_sink3.ravel(),
final_taxon_assignments_sink3.ravel()):
ft3[r, c] += 1
exp_ft3 = pd.DataFrame(ft3, index=exp_sources, columns=feature_ids)
exp_fts = [exp_ft1, exp_ft2, exp_ft3]
# Prepare the inputs for passing to collate_gibbs_results
all_envcounts = [final_env_counts_sink1, final_env_counts_sink2,
final_env_counts_sink3]
all_env_assignments = [final_env_assignments_sink1,
final_env_assignments_sink2,
final_env_assignments_sink3]
all_taxon_assignments = [final_taxon_assignments_sink1,
final_taxon_assignments_sink2,
final_taxon_assignments_sink3]
# Test when create_feature_tables=True
obs_prp, obs_prp_std, obs_fts = \
collate_gibbs_results(all_envcounts, all_env_assignments,
all_taxon_assignments, np.array(sink_ids),
np.array(exp_sources[:-1]),
np.array(feature_ids),
create_feature_tables=True, loo=False)
pd.util.testing.assert_frame_equal(obs_prp, exp_prp)
pd.util.testing.assert_frame_equal(obs_prp_std, exp_prp_std)
for i in range(3):
pd.util.testing.assert_frame_equal(obs_fts[i], exp_fts[i])
# Test when create_feature_tables=False
obs_prp, obs_prp_std, obs_fts = \
collate_gibbs_results(all_envcounts, all_env_assignments,
all_taxon_assignments, np.array(sink_ids),
np.array(exp_sources[:-1]),
np.array(feature_ids),
create_feature_tables=False, loo=False)
self.assertTrue(obs_fts is None)
def test_collate_gibbs_results_loo(self):
# We'll vary the depth of the sources - simulating a situation where
# the user has not rarefied.
# We'll set:
# draws = 2
# source_depths = [7, 4, 5]
# sources = 3 (+1 Unknown)
ec1 = np.array([[6, 0, 1],
[2, 2, 3]])
ea1 = np.array([[0, 2, 0, 0, 0, 0, 0],
[0, 1, 0, 2, 1, 2, 2]])
ta1 = np.array([[2, 2, 2, 4, 4, 4, 6],
[2, 2, 2, 4, 4, 4, 6]])
ec2 = np.array([[1, 2, 1],
[2, 2, 0]])
ea2 = np.array([[0, 1, 2, 1],
[0, 1, 1, 0]])
ta2 = np.array([[3, 3, 3, 3],
[3, 3, 3, 3]])
ec3 = np.array([[1, 2, 2],
[4, 0, 1]])
ea3 = np.array([[1, 1, 0, 2, 2],
[0, 0, 0, 0, 2]])
ta3 = np.array([[3, 3, 4, 5, 5],
[3, 3, 4, 5, 5]])
# Create expected proportion data.
prp_data = np.array([[0, 8/14., 2/14., 4/14.],
[3/8., 0, 4/8., 1/8.],
[5/10., 2/10., 0, 3/10.]], dtype=np.float64)
prp_std_data = np.zeros((3, 4), dtype=np.float64)
prp_std_data[0, 1:] = (ec1 / ec1.sum()).std(0)
prp_std_data[1, np.array([0, 2, 3])] = (ec2 / ec2.sum()).std(0)
prp_std_data[2, np.array([0, 1, 3])] = (ec3 / ec3.sum()).std(0)
exp_sources = ['source%s' % i for i in range(3)] + ['Unknown']
feature_ids = ['f%s' % i for i in range(7)]
exp_prp = pd.DataFrame(prp_data, index=exp_sources[:-1],
columns=exp_sources)
exp_prp_std = pd.DataFrame(prp_std_data, index=exp_sources[:-1],
columns=exp_sources)
# Create expected feature table data.
ft1 = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 4, 0, 3, 0, 1],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 2, 0, 1]], dtype=np.int64)
ft2 = np.array([[0, 0, 0, 3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.int64)
ft3 = np.array([[0, 0, 0, 2, 2, 1, 0],
[0, 0, 0, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 3, 0]], dtype=np.int64)
exp_fts = [pd.DataFrame(ft1, index=exp_sources, columns=feature_ids),
pd.DataFrame(ft2, index=exp_sources, columns=feature_ids),
pd.DataFrame(ft3, index=exp_sources, columns=feature_ids)]
# Prepare the inputs for passing to collate_gibbs_results
all_envcounts = [ec1, ec2, ec3]
all_env_assignments = [ea1, ea2, ea3]
all_taxon_assignments = [ta1, ta2, ta3]
# Test when create_feature_tables=True
obs_prp, obs_prp_std, obs_fts = \
collate_gibbs_results(all_envcounts, all_env_assignments,
all_taxon_assignments,
np.array(exp_sources[:-1]),
np.array(exp_sources[:-1]),
np.array(feature_ids),
create_feature_tables=True, loo=True)
pd.util.testing.assert_frame_equal(obs_prp, exp_prp)
pd.util.testing.assert_frame_equal(obs_prp_std, exp_prp_std)
for i in range(3):
pd.util.testing.assert_frame_equal(obs_fts[i], exp_fts[i])
# Test when create_feature_tables=False
obs_prp, obs_prp_std, obs_fts = \
collate_gibbs_results(all_envcounts, all_env_assignments,
all_taxon_assignments,
np.array(exp_sources[:-1]),
np.array(exp_sources[:-1]),
np.array(feature_ids),
create_feature_tables=False, loo=True)
self.assertTrue(obs_fts is None)
class TestBookkeeping(TestCase):
'''Tests for fnxs which generate bookkeeping data for `gibbs_sampler`.'''
def test_generate_environment_assignment(self):
np.random.seed(235234234)
obs_sea, obs_ecs = generate_environment_assignments(100, 10)
exp_sea = \
np.array([7, 3, 4, 1, 5, 2, 6, 3, 6, 4, 4, 7, 8, 2, 7, 7, 9, 9, 4,
7, 0, 3, 6, 5, 7, 2, 7, 1, 2, 4, 1, 7, 0, 7, 5, 2, 8, 5,
3, 3, 1, 4, 3, 3, 8, 7, 7, 5, 2, 6, 0, 2, 4, 0, 0, 5, 9,
8, 2, 8, 9, 9, 8, 7, 5, 8, 0, 9, 8, 6, 3, 2, 3, 7, 3, 8,
4, 4, 9, 1, 6, 6, 0, 9, 2, 9, 9, 4, 2, 9, 0, 4, 1, 3, 4,
0, 0, 9, 8, 3])
exp_ecs = np.array([10, 6, 11, 12, 12, 7, 7, 13, 10, 12])
np.testing.assert_array_equal(obs_sea, exp_sea)
np.testing.assert_array_equal(obs_ecs, exp_ecs)
class ConditionalProbabilityTests(TestCase):
'''Unit test for the ConditionalProbability class.'''
def setUp(self):
# create an object we can reuse for several of the tests
self.alpha1 = .5
self.alpha2 = .001
self.beta = 10
self.source_data = np.array([[0, 0, 0, 100, 100, 100],
[100, 100, 100, 0, 0, 0]])
self.cp = ConditionalProbability(self.alpha1, self.alpha2, self.beta,
self.source_data)
def test_init(self):
exp_alpha1 = self.alpha1
exp_alpha2 = self.alpha2
exp_beta = self.beta
exp_m_xivs = self.source_data
exp_m_vs = np.array([[300], [300]])
exp_V = 3
exp_tau = 6
exp_joint_probability = np.array([0, 0, 0])
self.assertEqual(self.cp.alpha1, exp_alpha1)
self.assertEqual(self.cp.alpha2, exp_alpha2)
self.assertEqual(self.cp.beta, exp_beta)
np.testing.assert_array_equal(self.cp.m_xivs, exp_m_xivs)
np.testing.assert_array_equal(self.cp.m_vs, exp_m_vs)
self.assertEqual(self.cp.V, exp_V)
self.assertEqual(self.cp.tau, exp_tau)
np.testing.assert_array_equal(self.cp.joint_probability,
exp_joint_probability)
def test_set_n(self):
self.cp.set_n(500)
self.assertEqual(self.cp.n, 500)
def test_precalculate(self):
alpha1 = .01
alpha2 = .3
beta = 35
source_data = np.array([[10, 5, 2, 100],
[0, 76, 7, 3],
[9, 5, 0, 0],
[0, 38, 11, 401]])
cp = ConditionalProbability(alpha1, alpha2, beta, source_data)
n = 1300
cp.set_n(n)
cp.precalculate()
# Calculated by hand.
exp_known_p_tv = np.array(
[[.085526316, .042805878, .017173636, .85449419],
[.000116225, .883426313, .081473733, .034983728],
[.641737892, .356837607, .000712251, .000712251],
[.00002222, .084459159, .024464492, .891054129]])
exp_denominator_p_v = 1299 + 35 * 5
exp_known_source_cp = exp_known_p_tv / exp_denominator_p_v
exp_alpha2_n = 390
exp_alpha2_n_tau = 1560
self.assertEqual(cp.denominator_p_v, exp_denominator_p_v)
self.assertEqual(cp.alpha2_n, exp_alpha2_n)
self.assertEqual(cp.alpha2_n_tau, exp_alpha2_n_tau)
np.testing.assert_array_almost_equal(cp.known_p_tv, exp_known_p_tv)
np.testing.assert_array_almost_equal(cp.known_source_cp,
exp_known_source_cp)
def test_calculate_cp_slice(self):
# test with non overlapping two component mixture.
n = 500
self.cp.set_n(n)
self.cp.precalculate()
n_vnoti = np.array([305, 1, 193])
m_xiVs = np.array([25, 30, 29, 10, 60, 39])
m_V = 193 # == m_xiVs.sum() == n_vnoti[2]
# Calculated by hand.
exp_jp_array = np.array(
[[9.82612e-4, 9.82612e-4, 9.82612e-4, .1975051, .1975051,
.1975051],
[6.897003e-3, 6.897003e-3, 6.897003e-3, 3.4313e-5, 3.4313e-5,
3.4313e-5],
[.049925736, .059715096, .057757224, .020557656, .118451256,
.077335944]])
obs_jp_array = np.zeros((3, 6))
for i in range(6):
obs_jp_array[:, i] = self.cp.calculate_cp_slice(i, m_xiVs[i], m_V,
n_vnoti)
np.testing.assert_array_almost_equal(obs_jp_array, exp_jp_array)
# Test using Dan's R code and some print statements. Using the same
# data as specified in setup.
# Print statesments are added starting at line 347 of SourceTracker.r.
# The output is being used to compare the p_tv * p_v calculation that
# we are making. Used the following print statements:
# print(sink)
# print(taxon)
# print(sources)
# print(rowSums(sources))
# print(envcounts)
# print(p_v_denominator)
# print('')
# print(p_tv)
# print(p_v)
# print(p_tv * p_v)
# Results of print statements
# [1] 6
# [1] 100 100 100 100 100 100
# otu_1 otu_2 otu_3 otu_4 otu_5 otu_6
# Source_1 0.5 0.5 0.5 100.5 100.5 100.5
# Source_2 100.5 100.5 100.5 0.5 0.5 0.5
# Unknown 36.6 29.6 29.6 37.6 26.6 31.6
# Source_1 Source_2 Unknown
# 303.0 303.0 191.6
# [1] 213 218 198
# [1] 629
# [1] ""
# Source_1 Source_2 Unknown
# 0.331683168 0.001650165 0.164926931
# [1] 0.3386328 0.3465819 0.3147854
# Source_1 Source_2 Unknown
# 0.1123187835 0.0005719173 0.0519165856
# The sink is the sum of the source data, self.source_data.sum(1).
cp = ConditionalProbability(self.alpha1, self.alpha2, self.beta,
self.source_data)
cp.set_n(600)
cp.precalculate()
# Taxon selected by R was 6, but R is 1-indexed and python is
# 0-indexed.
taxon_index = 5
# Must subtract alpha2 * tau * n from the Unknown sum since the R
# script adds these values to the 'Sources' matrix.
unknown_sum = 188
unknown_at_t5 = 31
# Must subtract beta from each envcount because the R script adds this
# value to the 'envcounts' matrix.
envcounts = np.array([203, 208, 188])
obs_jp = cp.calculate_cp_slice(taxon_index, unknown_at_t5, unknown_sum,
envcounts)
# From the final line of R results above.
exp_jp = np.array([0.1123187835, 0.0005719173, 0.0519165856])
np.testing.assert_array_almost_equal(obs_jp, exp_jp)
class TestGibbs(TestCase):
'''Unit tests for Gibbs based on seeding the PRNG and hand calculations.'''
def test_single_pass_gibbs_sampler(self):
# The data for this test was built by seeding the PRNG, and making the
# calculations that Gibb's would make, and then comparing the results.
restarts = 1
draws_per_restart = 1
burnin = 0
# Setting delay to 2 is the only way to stop the Sampler after a single
# pass.
delay = 2
alpha1 = .2
alpha2 = .1
beta = 3
source_data = np.array([[0, 1, 4, 10],
[3, 2, 1, 1]])
sink = np.array([2, 1, 4, 2])
# Make calculations using gibbs function.
np.random.seed(0)
cp = ConditionalProbability(alpha1, alpha2, beta, source_data)
obs_ec, obs_ea, obs_ta = gibbs_sampler(sink, cp, restarts,
draws_per_restart, burnin,
delay)
# Make calculation using handrolled.
np.random.seed(0)
choices = np.arange(3)
np.random.choice(choices, size=9, replace=True)
order = np.arange(9)
np.random.shuffle(order)
expected_et_pairs = np.array([[2, 0, 1, 2, 0, 1, 0, 1, 0],
[3, 2, 2, 2, 0, 0, 1, 2, 3]])
envcounts = np.array([4., 3., 2.])
unknown_vector = np.array([0, 0, 1, 1])
# Calculate known probabilty base as ConditionalProbability would.
denominator = np.array([[(15 + (4*.2)) * (8 + 3*3)],
[(7 + (4*.2)) * (8 + 3*3)]])
numerator = np.array([[0, 1, 4, 10],
[3, 2, 1, 1]]) + .2
known_env_prob_base = numerator / denominator
# Set up a sequence environment assignments vector. This would normally
# be handled by the Sampler class.
seq_env_assignments = np.zeros(9)
# Set up joint probability holder, normally handeled by
# ConditionalProbability class.
joint_prob = np.zeros(3)
for i, (e, t) in enumerate(expected_et_pairs.T):
envcounts[e] -= 1
if e == 2:
unknown_vector[t] -= 1
# Calculate the new probabilty as ConditionalProbability would.
joint_prob = np.zeros(3)
joint_prob[:-1] += envcounts[:-1] + beta
joint_prob[:-1] = joint_prob[:-1] * known_env_prob_base[:2, t]
joint_prob[-1] = (unknown_vector[t] + (9 * .1)) / \
(unknown_vector.sum() + (9 * .1 * 4))
joint_prob[-1] *= ((envcounts[2] + beta) / (8 + 3*3))
# Another call to the PRNG
new_e = np.random.choice(np.array([0, 1, 2]),
p=joint_prob/joint_prob.sum())
seq_env_assignments[i] = new_e
envcounts[new_e] += 1
if new_e == 2:
unknown_vector[t] += 1
# prps = envcounts / float(envcounts.sum())
# exp_mps = prps/prps.sum()
# Create taxon table like Sampler class would.
exp_ct = np.zeros((4, 3))
for i in range(9):
exp_ct[expected_et_pairs[1, i],
np.int(seq_env_assignments[i])] += 1
# np.testing.assert_array_almost_equal(obs_mps.squeeze(), exp_mps)
# np.testing.assert_array_equal(obs_ct.squeeze().T, exp_ct)
np.testing.assert_array_equal(obs_ec.squeeze(), envcounts)
np.testing.assert_array_equal(obs_ea.squeeze()[order],
seq_env_assignments)
np.testing.assert_array_equal(obs_ta.squeeze()[order],
expected_et_pairs[1, :])
def test_gibbs_params_bad(self):
# test gibbs when the parameters passed are bad
features = ['o1', 'o2', 'o3', 'o4', 'o5', 'o6']
source1 = np.array([10, 10, 10, 0, 0, 0])
source2 = np.array([0, 0, 0, 10, 10, 10])
sources = pd.DataFrame(np.vstack((source1, source2)).astype(np.int32),
index=['source1', 'source2'], columns=features)
self.assertRaises(ValueError, gibbs, sources, alpha1=-.3)
def test_gibbs_data_bad(self):
# input has nans.
features = ['o1', 'o2', 'o3', 'o4', 'o5', 'o6']
source1 = np.array([10, 10, 10, 0, 0, np.nan])
source2 = np.array([0, 0, 0, 10, 10, 10])
sources = pd.DataFrame(np.vstack((source1, source2)),
index=['source1', 'source2'], columns=features)
self.assertRaises(ValueError, gibbs, sources)
# features do not overlap.
features = ['o1', 'o2', 'o3', 'o4', 'o5', 'o6']
source1 = np.array([10, 10, 10, 0, 0, 0])
source2 = np.array([0, 0, 0, 10, 10, 10])
sources = pd.DataFrame(np.vstack((source1, source2)),
index=['source1', 'source2'], columns=features)
features2 = ['o1', 'asdsadO2', 'o3', 'o4', 'o5', 'o6']
sink1 = np.array([10, 10, 10, 0, 0, 0])
sink2 = np.array([0, 0, 0, 10, 10, 10])
sinks = pd.DataFrame(np.vstack((sink1, sink2)),
index=['sink1', 'sink2'], columns=features2)
self.assertRaises(ValueError, gibbs, sources, sinks)
# there are negative counts.
sources.iloc[0, 2] = -10
self.assertRaises(ValueError, gibbs, sources)
# non-real data in input dataframe.
# copied from test of `validate_gibbs_input`.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=['f%s' % i for i in range(5)],
columns=['s%s' % i for i in range(4)])
sources.iloc[2, 2] = '3.a'
self.assertRaises(ValueError, validate_gibbs_input, sources)
def test_consistency_when_gibbs_seeded(self):
'''Test consistency of `gibbs` (without LOO) from run to run.
Notes
-----
The number of calls to the PRNG should be stable (and thus this test,
which is seeded, should not fail). Any changes made to the code which
cause this test to fail should be scrutinized very carefully.
If the number of calls to the PRNG has not been changed, then an error
has been introduced somewhere else in the code. If the number of calls
has been changed, the deterministic tests should fail as well, but
since they are a small example they might not fail (false negative).
This test is extensive (it does 201 loops through the entire
`gibbs_sampler` block).
'''
features = ['o1', 'o2', 'o3', 'o4', 'o5', 'o6']
source1 = np.array([10, 10, 10, 0, 0, 0])
source2 = np.array([0, 0, 0, 10, 10, 10])
sink1 = .5*source1 + .5*source2
sinks = pd.DataFrame(sink1.reshape(1, 6).astype(np.int32),
index=['sink1'], columns=features)
sources = pd.DataFrame(np.vstack((source1, source2)).astype(np.int32),
index=['source1', 'source2'], columns=features)
np.random.seed(1042)
mpm, mps, fts = gibbs(sources, sinks, alpha1=.001, alpha2=.01, beta=1,
restarts=3, draws_per_restart=5, burnin=50,
jobs=2, delay=4, create_feature_tables=True)
possible_sources = ['source1', 'source2', 'Unknown']
vals = np.array([[0.44, 0.44666667, 0.11333333]])
exp_mpm = pd.DataFrame(vals, index=['sink1'], columns=possible_sources)
vals = np.array([[0.00824322, 0.00435465, 0.01047985]])
exp_mps = pd.DataFrame(vals, index=['sink1'], columns=possible_sources)
vals = np.array([[69, 64, 65, 0, 0, 0],
[0, 0, 0, 67, 70, 64],
[6, 11, 10, 8, 5, 11]], dtype=np.int32)
exp_fts = pd.DataFrame(vals, index=possible_sources, columns=features)
pd.util.testing.assert_frame_equal(mpm, exp_mpm)
pd.util.testing.assert_frame_equal(mps, exp_mps)
pd.util.testing.assert_frame_equal(fts[0], exp_fts)
def test_consistency_when_gibbs_loo_seeded(self):
'''Test consistency of `gibbs` (loo) from run to run.
Notes
-----
The number of calls to the PRNG should be stable (and thus this test,
which is seeded, should not fail). Any changes made to the code which
cause this test to fail should be scrutinized very carefully.
If the number of calls to the PRNG has not been changed, then an error
has been introduced somewhere else in the code. If the number of calls
has been changed, the deterministic tests should fail as well, but
since they are a small example they might not fail (false negative).
This test is extensive (it does 201 loops through the entire
`gibbs_sampler` block for each source).
'''
source1a = np.array([10, 10, 10, 0, 0, 0])
source1b = np.array([8, 8, 8, 2, 2, 2])
source2a = np.array([0, 0, 0, 10, 10, 10])
source2b = np.array([4, 4, 4, 6, 6, 6])
vals = np.vstack((source1a, source1b, source2a,
source2b)).astype(np.int32)
source_names = ['source1a', 'source1b', 'source2a', 'source2b']
feature_names = ['o1', 'o2', 'o3', 'o4', 'o5', 'o6']
sources = pd.DataFrame(vals, index=source_names, columns=feature_names)
np.random.seed(1042)
obs_mpm, obs_mps, obs_fts = gibbs(sources, sinks=None, alpha1=.001,
alpha2=.01, beta=1, restarts=3,
draws_per_restart=5, burnin=50,
delay=4, create_feature_tables=True)
vals = np.array([[0., 0.62444444, 0., 0.01555556, 0.36],
[0.68444444, 0., 0.09333333, 0.12666667, 0.09555556],
[0., 0.00888889, 0., 0.08222222, 0.90888889],
[0.19111111, 0.2, 0.5, 0., 0.10888889]])
exp_mpm = pd.DataFrame(vals, index=source_names,
columns=source_names + ['Unknown'])
vals = np.array([[0., 0.02406393, 0., 0.0015956, 0.02445387],
[0.0076923, 0., 0.00399176, 0.00824322, 0.00648476],
[0., 0.00127442, 0., 0.00622575, 0.00609752],
[0.00636175, 0.00786721, 0.00525874, 0., 0.00609752]])
exp_mps = pd.DataFrame(vals, index=source_names,
columns=source_names + ['Unknown'])
fts0_vals = np.array([[0, 0, 0, 0, 0, 0],
[93, 87, 101, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[3, 4, 0, 0, 0, 0],
[54, 59, 49, 0, 0, 0]])
fts1_vals = np.array([[113, 98, 97, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 15, 13, 14],
[5, 7, 11, 11, 12, 11],
[2, 15, 12, 4, 5, 5]])
fts2_vals = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 2, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 12, 12, 13],
[0, 0, 0, 136, 137, 136]])
fts3_vals = np.array([[28, 27, 31, 0, 0, 0],
[27, 24, 25, 3, 4, 7],
[0, 0, 0, 80, 71, 74],
[0, 0, 0, 0, 0, 0],
[5, 9, 4, 7, 15, 9]])
fts_vals = [fts0_vals, fts1_vals, fts2_vals, fts3_vals]
exp_fts = [pd.DataFrame(vals, index=source_names + ['Unknown'],
columns=feature_names) for vals in fts_vals]
pd.util.testing.assert_frame_equal(obs_mpm, exp_mpm)
pd.util.testing.assert_frame_equal(obs_mps, exp_mps)
for obs_fts, exp_fts in zip(obs_fts, exp_fts):
pd.util.testing.assert_frame_equal(obs_fts, exp_fts)
def test_gibbs_close_to_sourcetracker_1(self):
'''This test is stochastic; occasional errors might occur.
Notes
-----
This tests against the R-code SourceTracker version 1.0, using
R version 2.15.3.
'''
sources_data = \
np.array([[0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 19, 0, 0, 0, 0, 0]],
dtype=np.int32)
sources_names = ['source1', 'source2', 'source3']
feature_names = ['f%i' % i for i in range(32)]
sources = pd.DataFrame(sources_data, index=sources_names,
columns=feature_names)
sinks_data = np.array([[0, 0, 0, 0, 0, 0, 170, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 385, 0, 0, 0, 0, 0, 0, 0, 350, 0, 0,
0, 0, 95],
[0, 0, 0, 0, 0, 0, 170, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 380, 0, 0, 0, 0, 0, 0, 0, 350, 0, 0,
0, 0, 100],
[0, 0, 0, 0, 0, 0, 170, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 378, 0, 0, 0, 0, 0, 0, 0, 350, 0, 0,
0, 0, 102],
[0, 0, 0, 0, 0, 0, 170, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 386, 0, 0, 0, 0, 0, 0, 0, 350, 0, 0,
0, 0, 94]], dtype=np.int32)
sinks_names = ['sink1', 'sink2', 'sink3', 'sink4']
sinks = pd.DataFrame(sinks_data, index=sinks_names,
columns=feature_names)
obs_mpm, obs_mps, _ = gibbs(sources, sinks, alpha1=.001, alpha2=.1,
beta=10, restarts=2, draws_per_restart=2,
burnin=5, delay=2,
create_feature_tables=False)
exp_vals = np.array([[0.1695, 0.4781, 0.3497, 0.0027],
[0.1695, 0.4794, 0.3497, 0.0014],
[0.1693, 0.4784, 0.3499, 0.0024],
[0.1696, 0.4788, 0.3494, 0.0022]])
exp_mpm = pd.DataFrame(exp_vals, index=sinks_names,
columns=sources_names + ['Unknown'])
pd.util.testing.assert_index_equal(obs_mpm.index, exp_mpm.index)
pd.util.testing.assert_index_equal(obs_mpm.columns, exp_mpm.columns)
np.testing.assert_allclose(obs_mpm.values, exp_mpm.values, atol=.01)
class PlotHeatmapTests(TestCase):
def setUp(self):
vals = np.array([[0., 0.62444444, 0., 0.01555556, 0.36],
[0.68444444, 0., 0.09333333, 0.12666667, 0.09555556],
[0., 0.00888889, 0., 0.08222222, 0.90888889],
[0.19111111, 0.2, 0.5, 0., 0.10888889]])
source_names = ['source1a', 'source1b', 'source2a', 'source2b']
self.mpm = pd.DataFrame(vals, index=source_names,
columns=source_names + ['Unknown'])
def test_defaults(self):
# plot_heatmap call returns successfully
fig, ax = plot_heatmap(self.mpm)
def test_non_defaults(self):
# plot_heatmap call returns successfully
fig, ax = plot_heatmap(self.mpm, cm=plt.cm.jet,
xlabel='Other 1', ylabel='Other 2',
title='Other 3')
if __name__ == '__main__':
main()
| bsd-3-clause | -7,534,571,585,709,418,000 | 45.5 | 79 | 0.49133 | false | 3.320111 | true | false | false |
DanyC97/algorithms | algorithms/searching/depth_first_search.py | 5 | 1076 | """
Depth First Search
------------------
Recursive implementation of the depth first search algorithm used to
traverse trees or graphs. Starts at a selected node (root) and explores the
branch as far as possible before backtracking.
Time Complexity: O(E + V)
E = Number of edges
V = Number of vertices (nodes)
Pseudocode: https://en.wikipedia.org/wiki/Depth-first_search
"""
def dfs(graph, start, path=[]):
"""
Depth first search that recursively searches the path. Backtracking occurs
only when the last node in the path is visited.
:param graph: A dictionary of nodes and edges.
:param start: The node to start the recursive search with.
:param path: A list of edges to search.
:rtype: A boolean indicating whether the node is included in the path.
"""
if start not in graph or graph[start] is None or graph[start] == []:
return None
path = path + [start]
for edge in graph[start]:
if edge not in path:
path = dfs(graph, edge, path)
return path
| bsd-3-clause | 6,116,533,381,887,268,000 | 29.742857 | 79 | 0.648699 | false | 4.203125 | false | false | false |
google/starthinker | starthinker/task/traffic/test.py | 1 | 1495 | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from starthinker.util.google_api import API_DCM
from starthinker.util.cm import get_profile_for_api
def bulkdozer_test(config, task):
print('testing bulkdozer')
if 'verify' in task['traffic']:
is_admin, profile_id = get_profile_for_api(
config, task['auth'], task['traffic']['account_id'])
for entity in task['traffic']['verify']:
service = getattr(
API_DCM(config, task['auth'], internal=is_admin), entity['type'])
cm_entity = service().get(profileId=profile_id, id=entity['id']).execute()
values = entity['values']
for key in values:
if values[key] != cm_entity[key]:
raise ValueError('%s %s expected to be %s, was %s' % (entity['type'], key, values[key], cm_entity[key]))
| apache-2.0 | 3,856,234,003,942,590,000 | 37.333333 | 114 | 0.613378 | false | 3.986667 | false | false | false |
Catch-up-TV-and-More/plugin.video.catchuptvandmore | resources/lib/websites/lesargonautes.py | 1 | 2870 | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, SylvainCecchetto
# GNU General Public License v2.0+ (see LICENSE.txt or https://www.gnu.org/licenses/gpl-2.0.txt)
# This file is part of Catch-up TV & More
from __future__ import unicode_literals
import json
import re
from codequick import Listitem, Resolver, Route
import urlquick
from resources.lib import download
from resources.lib.menu_utils import item_post_treatment
# TO DO
# Fix Download Mode
URL_ROOT = 'http://lesargonautes.telequebec.tv'
URL_VIDEOS = URL_ROOT + '/Episodes'
URL_STREAM_DATAS = 'https://mnmedias.api.telequebec.tv/api/v2/media/mediaUid/%s'
URL_STREAM = 'https://mnmedias.api.telequebec.tv/m3u8/%s.m3u8'
# VideoId
@Route.register
def website_root(plugin, item_id, **kwargs):
"""Add modes in the listing"""
resp = urlquick.get(URL_VIDEOS)
list_seasons_datas = re.compile(r'li path\=\"(.*?)\"').findall(resp.text)
for season_datas in list_seasons_datas:
season_title = season_datas
item = Listitem()
item.label = season_title
item.set_callback(list_videos,
item_id=item_id,
season_title=season_title)
item_post_treatment(item)
yield item
@Route.register
def list_videos(plugin, item_id, season_title, **kwargs):
resp = urlquick.get(URL_VIDEOS)
root = resp.parse("li", attrs={"path": season_title})
for video_datas in root.iterfind(".//li[@class='episode']"):
video_title = video_datas.find(".//div[@class='title']").text.strip(
) + ' - Episode ' + video_datas.find(
".//span[@path='Number']").text.strip()
video_image = video_datas.find(".//img[@class='screen']").get('src')
video_plot = video_datas.find(".//div[@class='summary']").text.strip()
video_id = video_datas.find(".//input[@path='MediaUid']").get('value')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.info['plot'] = video_plot
item.set_callback(get_video_url,
item_id=item_id,
video_id=video_id)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
@Resolver.register
def get_video_url(plugin,
item_id,
video_id,
download_mode=False,
**kwargs):
"""Get video URL and start video player"""
if video_id == '':
plugin.notify('ERROR', plugin.localize(30716))
return False
resp = urlquick.get(URL_STREAM_DATAS % video_id, verify=False)
json_parser = json.loads(resp.text)
final_video_url = URL_STREAM % json_parser['media']['mediaId']
if download_mode:
return download.download_video(final_video_url)
return final_video_url
| gpl-2.0 | 2,377,813,101,347,847,000 | 29.860215 | 96 | 0.61324 | false | 3.364596 | false | false | false |
south-coast-science/scs_core | tests/aqcsv/specification/country_test.py | 1 | 1100 | #!/usr/bin/env python3
"""
Created on 4 Mar 2019
@author: Bruno Beloff ([email protected])
"""
from scs_core.aqcsv.specification.country_iso import CountryISO
from scs_core.aqcsv.specification.country_numeric import CountryNumeric
from scs_core.data.json import JSONify
# --------------------------------------------------------------------------------------------------------------------
print("list ISO...")
for country in CountryISO.instances():
print(country)
print("-")
print("find ISO...")
iso = "TUN"
iso_country = CountryISO.instance(iso)
print("iso:%s country:%s" % (iso, iso_country))
print(JSONify.dumps(iso_country))
print("-")
print("list Numeric...")
for country in CountryNumeric.instances():
print(country)
print("-")
print("find Numeric...")
numeric = 788
numeric_country = CountryNumeric.instance(numeric)
print("numeric:%s country:%s" % (numeric, numeric_country))
print(JSONify.dumps(numeric_country))
print("-")
print("equality...")
equality = iso_country == numeric_country
print("iso_country == numeric_country: %s" % equality)
print("-")
| mit | 8,914,476,633,248,161,000 | 22.913043 | 118 | 0.640909 | false | 3.548387 | false | false | false |
varun-suresh/Clustering | evaluation.py | 1 | 1895 | # Script to evaluate the performance of the clustering algorithm.
import argparse
from itertools import combinations
from collections import defaultdict
def count_correct_pairs(cluster, labels_lookup):
"""
Given a cluster, count the number of pairs belong to the same label and
the total number of pairs.
"""
total_pairs = 0
correct_pairs = 0
pairs = combinations(cluster, 2)
for f1, f2 in pairs:
if labels_lookup[f1] == labels_lookup[f2]:
correct_pairs += 1
total_pairs += 1
return correct_pairs, total_pairs
def calculate_pairwise_pr(clusters, labels_lookup):
"""
Given a cluster, return pairwise precision and recall.
"""
correct_pairs = 0
total_pairs = 0
# Precision
for cluster in clusters:
cp, tp = count_correct_pairs(cluster, labels_lookup)
correct_pairs += cp
total_pairs += tp
# Recall:
gt_clusters = defaultdict(list)
# Count the actual number of possible true pairs:
for row_no, label in labels_lookup.items():
gt_clusters[label].append(row_no)
true_pairs = 0
for cluster_id, cluster_items in gt_clusters.items():
n = len(cluster_items)
true_pairs += n * (n-1)/2.0
print("Correct Pairs that are in the same cluster:{}".format(correct_pairs))
print("Total pairs as per the clusters created: {}".format(total_pairs))
print("Total possible true pairs:{}".format(true_pairs))
precision = float(correct_pairs)/total_pairs
recall = float(correct_pairs)/true_pairs
return precision, recall
if __name__ == '__main__':
parser = argparse.ArgumentError()
parser.add_argument('-c', '--clusters', help='List of lists where each \
list is a cluster')
parser.add_argument('-l', '--labels', help='List of labels associated \
with each vector.')
| mit | 7,007,695,952,947,865,000 | 33.454545 | 80 | 0.641689 | false | 4.075269 | false | false | false |
CheeseLord/warts | src/shared/geometry.py | 1 | 13529 | """
Functions for doing geometry calculations in the various types of coordinates
shared between client and server.
"""
import heapq
import math
from src.shared.config import CHUNK_SIZE, BUILD_SIZE
from src.shared.exceptions import NoPathToTargetError
from src.shared.logconfig import newLogger
log = newLogger(__name__)
BUILDS_PER_CHUNK = CHUNK_SIZE / BUILD_SIZE
# Costs used by pathfinding code.
# Measure distances in unit coordinates.
ORTHOGONAL_COST = CHUNK_SIZE
DIAGONAL_COST = int(ORTHOGONAL_COST * math.sqrt(2))
def findPath(gameState, srcPos, destPos):
"""
Compute and return a path from srcPos to destPos, avoiding any obstacles.
The returned path will be a list of waypoints such that a unit at srcPos
could travel by straight line to each of the waypoints in order and thus
get to destPos without hitting any obstacles.
"""
log.debug("Searching for path from %s to %s", srcPos, destPos)
chunkWidth, chunkHeight = gameState.sizeInChunks
# Value larger than any actual distance.
farFarAway = ORTHOGONAL_COST**2 * chunkWidth * chunkHeight
srcChunk = srcPos.chunk
destChunk = destPos.chunk
srcCX, srcCY = srcChunk
destCX, destCY = destChunk
# Make sure we're starting within the world.
if not (0 <= srcCX < chunkWidth and 0 <= srcCY < chunkHeight):
raise NoPathToTargetError("Starting point {} is outside the world."
.format(srcPos))
# If the source and dest points are in the same chunk, there's no point
# doing a chunk-based search to find a path, because the result will be
# trivial. Just go straight to the dest.
if srcChunk == destChunk:
return [destPos]
# This list actually serves 2 purposes. First, it keeps track of which
# chunks have been visited already. Second, for those that have been
# visited, it tracks which chunk came before it in the shortest path from
# the srcChunk to it.
parents = [[None for _y in range(chunkHeight)]
for _x in range(chunkWidth)]
# Set to True for a node once we know we've found a shortest path to it, so
# that we don't keep checking new paths to that node.
nodeFinalized = [[False for _y in range(chunkHeight)]
for _x in range(chunkWidth)]
# Shortest distance to each node from the start.
distanceFromStart = [[farFarAway for _y in range(chunkHeight)]
for _x in range(chunkWidth)]
distanceFromStart[srcCX][srcCY] = 0
# Priority queue of chunks that we still need to search outward from, where
# priority = distance from start + heuristic distance to end.
chunksToCheck = []
heapq.heappush(chunksToCheck, (_heuristicDistance(srcChunk, destChunk),
srcChunk))
while len(chunksToCheck) > 0:
_, currChunk = heapq.heappop(chunksToCheck)
log.debug("Pathfinding: search out from %s", currChunk)
if currChunk == destChunk:
break
cx, cy = currChunk
if nodeFinalized[cx][cy]:
# Already expanded from this node; don't do it again.
continue
nodeFinalized[cx][cy] = True
log.debug("Pathfinding: checking neighbors.")
for addlDist, neighbor in _getValidNeighbors(currChunk, gameState):
log.debug("Pathfinding: trying %s", neighbor)
nx, ny = neighbor
neighborStartDist = distanceFromStart[cx][cy] + addlDist
if neighborStartDist < distanceFromStart[nx][ny]:
log.debug("Pathfinding: found shorter path to neighbor.")
distanceFromStart[nx][ny] = neighborStartDist
parents[nx][ny] = currChunk
neighborFwdDist = _heuristicDistance(neighbor, destChunk)
neighborEstCost = neighborStartDist + neighborFwdDist
heapq.heappush(chunksToCheck, (neighborEstCost, neighbor))
if (not _chunkInBounds(gameState, destChunk)) or \
parents[destCX][destCY] is None:
raise NoPathToTargetError("No path exists from {} to {}."
.format(srcPos, destPos))
# Build the list of waypoints backward, by following the trail of parents
# all the way from dest to source.
lim = chunkWidth * chunkHeight
waypoints = []
currChunk = destChunk
while currChunk != srcChunk:
waypoints.append(currChunk)
cx, cy = currChunk
currChunk = parents[cx][cy]
assert currChunk is not None
# If there's a bug, crash rather than hanging (it's easier to debug).
lim -= 1
assert lim >= 0, "Infinite loop detected in findPath"
# Reverse the list of waypoints, since currently it's backward.
waypoints.reverse()
# Now convert the chunk coordinates to unit coordinates.
waypoints = [Coord.fromCBU(chunk=chunk).chunkCenter for chunk in waypoints]
# Note: The very first waypoint is still valid, because it's in a chunk
# orthogonally adjacent to the chunk containing the source point, so
# there's definitely not an obstacle in between.
# We still need to correct the last waypoint, which is currently the center
# of the dest chunk rather than the actual dest point. Note that we already
# handled the case where srcChunk == destChunk, so waypoints can't be
# empty.
waypoints[-1] = destPos
return waypoints
def _heuristicDistance(chunkA, chunkB):
"""
Return a heuristic estimate of the distance between chunk A and chunk B,
in *unit coordinates*.
"""
# Use Euclidean distance as the heuristic.
ax, ay = chunkA
bx, by = chunkB
deltaX = ORTHOGONAL_COST * (bx - ax)
deltaY = ORTHOGONAL_COST * (by - ay)
return int(math.hypot(deltaX, deltaY))
# Helper function for findPath.
def _getValidNeighbors(chunkPos, gameState):
x, y = chunkPos
# The 8 neighbors, separated into those orthogonally adjaent and those
# diagonally adjacent. Within each category, the particular neighbors are
# in random order.
diagonals = [
(x - 1, y + 1), # northwest
(x + 1, y - 1), # southeast
(x - 1, y - 1), # southwest
(x + 1, y + 1), # northeast
]
orthogonals = [
(x, y - 1), # south
(x, y + 1), # north
(x + 1, y ), # east
(x - 1, y ), # west
]
# Try diagonals first, so that when crossing a non-square rectangle we do
# the diagonal part of the path before the orthogonal part.
for neighbor in diagonals:
if _chunkInBounds(gameState, neighbor) and \
_chunkIsPassable(gameState, neighbor):
# Check that the other two corners of the square are passable, so
# we don't try to move through zero-width spaces in cases like:
# @@ B
# @@/
# /@@
# A @@
nx, ny = neighbor
if _chunkIsPassable(gameState, ( x, ny)) and \
_chunkIsPassable(gameState, (nx, y)):
yield (DIAGONAL_COST, neighbor)
for neighbor in orthogonals:
if _chunkInBounds(gameState, neighbor) and \
_chunkIsPassable(gameState, neighbor):
yield (ORTHOGONAL_COST, neighbor)
def _chunkInBounds(gameState, chunkPos):
return gameState.inBounds(Coord.fromCBU(chunk=chunkPos))
def _chunkIsPassable(gameState, chunkPos):
return gameState.isPassable(Coord.fromCBU(chunk=chunkPos))
class AbstractCoord(object):
def __init__(self, uPos):
super(AbstractCoord, self).__init__()
self.x, self.y = uPos
@classmethod
def fromUnit(cls, unit):
return cls(unit)
@classmethod
def fromCBU(cls, chunk=(0,0), build=(0,0), unit=(0,0)):
cx, cy = chunk
bx, by = build
ux, uy = unit
x = cx * CHUNK_SIZE + bx * BUILD_SIZE + ux
y = cy * CHUNK_SIZE + by * BUILD_SIZE + uy
return cls((x, y))
@property
def chunk(self):
return (self.x // CHUNK_SIZE, self.y // CHUNK_SIZE)
@property
def build(self):
return (self.x // BUILD_SIZE, self.y // BUILD_SIZE)
@property
def unit(self):
return (self.x, self.y)
@property
def buildSub(self):
return ((self.x % CHUNK_SIZE) // BUILD_SIZE,
(self.y % CHUNK_SIZE) // BUILD_SIZE)
@property
def unitSub(self):
return (self.x % BUILD_SIZE, self.y % BUILD_SIZE)
@property
def truncToChunk(self):
return self.fromCBU(chunk=self.chunk)
@property
def truncToBuild(self):
return self.fromCBU(build=self.build)
def serialize(self):
return [str(int(x)) for x in self.unit]
@classmethod
def deserialize(cls, descs):
assert len(descs) == 2
return cls.fromUnit(map(int, descs))
def __repr__(self):
return "{}({}, {})".format(type(self).__name__, self.x, self.y)
def __str__(self):
cx, cy = self.chunk
bx, by = self.build
ux, uy = self.unit
return "({cx}.{bx}.{ux}, {cy}.{by}.{uy})".format(
cx=cx, cy=cy, bx=bx, by=by, ux=ux, uy=uy
)
def __eq__(self, rhs):
if not isinstance(self, type(rhs)) and not isinstance(rhs, type(self)):
raise TypeError("Cannot compare {} with {}.".format(
type(self), type(rhs)
))
return self.x == rhs.x and self.y == rhs.y
def __ne__(self, rhs):
if not isinstance(self, type(rhs)) and not isinstance(rhs, type(self)):
raise TypeError("Cannot compare {} with {}.".format(
type(self), type(rhs)
))
return self.x != rhs.x or self.y != rhs.y
# Coord + Coord = err
# Coord + Dist = Coord
# Dist + Coord = Coord
# Dist + Dist = Dist
#
# Coord - Coord = Dist
# Coord - Dist = Coord
# Dist - Coord = err
# Dist - Dist = Dist
#
# - Coord = err
# - Dist = Dist
def __add__(self, rhs):
if isinstance(self, Coord) and isinstance(rhs, Coord):
raise TypeError("Cannot add two Coords.")
elif isinstance(self, Distance) and isinstance(rhs, Distance):
retType = Distance
else:
# Coord + Distance or Distance + Coord
retType = Coord
x = self.x + rhs.x
y = self.y + rhs.y
return retType((x, y))
def __sub__(self, rhs):
if isinstance(self, Coord) == isinstance(rhs, Coord):
# Coord - Coord or Distance - Distance
retType = Distance
elif isinstance(self, Coord):
# Coord - Distance
retType = Coord
else:
# Distance - Coord
raise TypeError("Cannot subtract Distance - Coord.")
x = self.x - rhs.x
y = self.y - rhs.y
return retType((x, y))
class Distance(AbstractCoord):
def length(self):
"Return the Euclidean length of this Distance."
return math.hypot(self.x, self.y)
def __neg__(self):
x = - self.x
y = - self.y
return Distance((x, y))
def __rmul__(self, lhs):
if not isinstance(lhs, int) and not isinstance(lhs, float):
raise TypeError("Cannot multiply Distance by {}.".format(
type(lhs)
))
return Distance((int(round(self.x * lhs)), int(round(self.y * lhs))))
def __mul__(self, rhs):
return rhs * self
class Coord(AbstractCoord):
@property
def chunkCenter(self):
"""
Return the Coord of the center of the chunk containing this Coord.
"""
return self.fromCBU(chunk=self.chunk,
unit=(CHUNK_SIZE // 2, CHUNK_SIZE // 2))
class Rect(object):
def __init__(self, coord, dist):
if not isinstance(coord, Coord):
raise TypeError("Rectangle must have Coordinate. Found {}".format(
type(coord)
))
if not isinstance(dist, Distance):
raise TypeError("Rectangle must have Distance. Found {}".format(
type(dist)
))
self.coord = coord
self.dist = dist
@property
def center(self):
return self.coord + 0.5*self.dist
def serialize(self):
return self.coord.serialize() + self.dist.serialize()
@classmethod
def deserialize(cls, descs):
assert len(descs) == 4
cdesc = descs[:2]
ddesc = descs[2:]
return cls(Coord.deserialize(cdesc), Distance.deserialize(ddesc))
def isRectCollision(rect1, rect2):
if not isinstance(rect1, Rect):
raise TypeError("Can only compare distance between rectangles."
"Found {}".format(
type(rect1)
))
if not isinstance(rect2, Rect):
raise TypeError("Can only compare distance between rectangles."
"Found {}".format(
type(rect2)
))
# Rectangle 1
left1, bottom1 = rect1.coord.unit
right1, top1 = (rect1.coord + rect1.dist).unit
right1, top1 = right1 - 1, top1 -1
# Rectangle 2
left2, bottom2 = rect2.coord.unit
right2, top2 = (rect2.coord + rect2.dist).unit
right2, top2 = right2 - 1, top2 -1
return ((bottom2 < top1 and top2 > bottom1) and
(left2 < right1 and right2 > left1))
| mit | 184,582,993,762,131,330 | 32.992462 | 79 | 0.592357 | false | 3.809913 | false | false | false |
jesseward/harmonic-shuffle | harmonic_shuffle/harmony.py | 1 | 3320 | import logging
logger = logging.getLogger(__name__)
class Harmony(object):
# numeric representation of the Circle of 5ths.
HARMONY = {
'G': 1,
'D': 2,
'A': 3,
'E': 4,
'B': 5,
'F#': 6,
'Gb': 6,
'Db': 7,
'C#': 7,
'Ab': 8,
'Eb': 9,
'Bb': 10,
'F': 11,
'C': 12,
'Em': 101,
'Bm': 102,
'F#m': 103,
'Gbm': 103,
'Dbm': 104,
'C#m': 104,
'G#m': 105,
'Ebm': 106,
'D#m': 106,
'A#m': 107,
'Bbm': 107,
'Fm': 108,
'Cm': 109,
'Gm': 110,
'Dm': 111,
'Am': 112,
}
def __init__(self, root_key):
"""
:param root_key: A string value representing the root key signature for the song.
"""
if root_key not in Harmony.HARMONY.keys():
raise LookupError('{key} is not reconized'.format(key=root_key))
self.root_key = root_key
self.root_key_value = Harmony.HARMONY[self.root_key]
# a list representing all compatible tone for a given root_key
self.harmonies = self._get_value(self.root_key_value) + self.down_shift() + self.up_shift() + self.minor()
def __repr__(self):
return '<Harmony key={0.root_key} value={0.root_key_value}>'.format(self)
@staticmethod
def _get_value(value):
""" performs a look-up of the HARMONY dictionary by value.
:parameter value: An integer representing a harmonic key
:return: A list of keys
:rtype list:
"""
return [note for note, fifth_value in Harmony.HARMONY.iteritems() if value == fifth_value]
def down_shift(self):
""" Fetches the next key(s) that represents a single tone downward
:return: A list representing a compatible key
:rtype list:
"""
# handle a roll over at position "1" on the wheel. in the case of 1 or 101 we down
# shift to 12 or 112
if self.root_key_value == 1:
down = Harmony._get_value(12)
elif self.root_key_value == 101:
down = Harmony._get_value(112)
else:
down = Harmony._get_value(self.root_key_value - 1)
return down
def up_shift(self):
""" Fetches the next key(s) that represents a single tone forward .
:return: A list representing a group of compatible keys
:rtype list:
"""
# handle a rollover at the apex of the wheel . when key_value is 12 or 112
# we shift forward to 1 (major) or 101 (minor)
if self.root_key_value == 12:
up = Harmony._get_value(1)
elif self.root_key_value == 112:
up = Harmony._get_value(101)
else:
up = Harmony._get_value(self.root_key_value + 1)
return up
def minor(self):
""" Fetches an adjacent key on the wheel (maj -> min or min -> maj).
:return: A list representing a group of compatible keys
:rtype list:
"""
# shift from major to minor
if self.root_key_value < 100:
return self._get_value(self.root_key_value + 100)
# otherwise shift minor to major.
else:
return self._get_value(self.root_key_value - 100)
| mit | -5,146,620,431,627,529,000 | 28.642857 | 114 | 0.528614 | false | 3.513228 | false | false | false |
kaffeebrauer/Lean | Algorithm.Python/CoarseFundamentalTop3Algorithm.py | 5 | 3479 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System.Core")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import QCAlgorithm
from QuantConnect.Data.UniverseSelection import *
### <summary>
### Demonstration of using coarse and fine universe selection together to filter down a smaller universe of stocks.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="universes" />
### <meta name="tag" content="coarse universes" />
### <meta name="tag" content="fine universes" />
class CoarseFundamentalTop3Algorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2014,3,24) #Set Start Date
self.SetEndDate(2014,4,7) #Set End Date
self.SetCash(50000) #Set Strategy Cash
# what resolution should the data *added* to the universe be?
self.UniverseSettings.Resolution = Resolution.Daily
# this add universe method accepts a single parameter that is a function that
# accepts an IEnumerable<CoarseFundamental> and returns IEnumerable<Symbol>
self.AddUniverse(self.CoarseSelectionFunction)
self.__numberOfSymbols = 3
self._changes = None
# sort the data by daily dollar volume and take the top 'NumberOfSymbols'
def CoarseSelectionFunction(self, coarse):
# sort descending by daily dollar volume
sortedByDollarVolume = sorted(coarse, key=lambda x: x.DollarVolume, reverse=True)
# return the symbol objects of the top entries from our sorted collection
return [ x.Symbol for x in sortedByDollarVolume[:self.__numberOfSymbols] ]
def OnData(self, data):
self.Log(f"OnData({self.UtcTime}): Keys: {', '.join([key.Value for key in data.Keys])}")
# if we have no changes, do nothing
if self._changes is None: return
# liquidate removed securities
for security in self._changes.RemovedSecurities:
if security.Invested:
self.Liquidate(security.Symbol)
# we want 1/N allocation in each security in our universe
for security in self._changes.AddedSecurities:
self.SetHoldings(security.Symbol, 1 / self.__numberOfSymbols)
self._changes = None
# this event fires whenever we have changes to our universe
def OnSecuritiesChanged(self, changes):
self._changes = changes
self.Log(f"OnSecuritiesChanged({self.UtcTime}):: {changes}")
def OnOrderEvent(self, fill):
self.Log(f"OnOrderEvent({self.UtcTime}):: {fill}") | apache-2.0 | -6,356,268,221,975,157,000 | 39.917647 | 151 | 0.708657 | false | 3.919955 | false | false | false |
redus/doodles | django-ex/survivalguide/survivalguide/settings.py | 1 | 2136 | """
Django settings for survivalguide project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hxzazv7qqn-nv8c=2(9$ch-3og5tms5-hr4s1zkhyxtrs9p8to'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'south',
'talks',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'survivalguide.urls'
WSGI_APPLICATION = 'survivalguide.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
CRISPY_TEMPLATE_PACK = 'bootstrap3'
| unlicense | -1,785,783,226,436,275,500 | 22.472527 | 71 | 0.720506 | false | 3.178571 | false | false | false |
ubuntunux/GuineaPig | PyEngine3D/Utilities/TransformObject.py | 1 | 6511 | import numpy as np
from .Transform import *
class TransformObject:
def __init__(self, local=None):
self.quat = Float4(0.0, 0.0, 0.0, 1.0)
self.local = local if local is not None else Matrix4()
self.updated = True
self.left = WORLD_LEFT.copy()
self.up = WORLD_UP.copy()
self.front = WORLD_FRONT.copy()
self.pos = Float3()
self.rot = Float3()
self.scale = Float3(1, 1, 1)
self.prev_Pos = Float3()
self.prev_Rot = Float3()
self.prev_Scale = Float3(1, 1, 1)
self.rotationMatrix = Matrix4()
self.matrix = Matrix4()
self.inverse_matrix = Matrix4()
self.prev_matrix = Matrix4()
self.prev_inverse_matrix = Matrix4()
self.update_transform(True)
def reset_transform(self):
self.updated = True
self.set_pos(Float3())
self.set_rotation(Float3())
self.set_scale(Float3(1, 1, 1))
self.update_transform(True)
# Translate
def get_pos(self):
return self.pos
def get_pos_x(self):
return self.pos[0]
def get_pos_y(self):
return self.pos[1]
def get_pos_z(self):
return self.pos[2]
def set_pos(self, pos):
self.pos[...] = pos
def set_pos_x(self, x):
self.pos[0] = x
def set_pos_y(self, y):
self.pos[1] = y
def set_pos_z(self, z):
self.pos[2] = z
def move(self, pos):
self.pos[...] = self.pos + pos
def move_front(self, pos):
self.pos[...] = self.pos + self.front * pos
def move_left(self, pos):
self.pos[...] = self.pos + self.left * pos
def move_up(self, pos):
self.pos[...] = self.pos + self.up * pos
def move_x(self, pos_x):
self.pos[0] += pos_x
def move_y(self, pos_y):
self.pos[1] += pos_y
def move_z(self, pos_z):
self.pos[2] += pos_z
# Rotation
def get_rotation(self):
return self.rot
def get_pitch(self):
return self.rot[0]
def get_yaw(self):
return self.rot[1]
def get_roll(self):
return self.rot[2]
def set_rotation(self, rot):
self.rot[...] = rot
def set_pitch(self, pitch):
if pitch > TWO_PI or pitch < 0.0:
pitch %= TWO_PI
self.rot[0] = pitch
def set_yaw(self, yaw):
if yaw > TWO_PI or yaw < 0.0:
yaw %= TWO_PI
self.rot[1] = yaw
def set_roll(self, roll):
if roll > TWO_PI or roll < 0.0:
roll %= TWO_PI
self.rot[2] = roll
def rotation(self, rot):
self.rotation_pitch(rot[0])
self.rotation_yaw(rot[1])
self.rotation_roll(rot[2])
def rotation_pitch(self, delta=0.0):
self.rot[0] += delta
if self.rot[0] > TWO_PI or self.rot[0] < 0.0:
self.rot[0] %= TWO_PI
def rotation_yaw(self, delta=0.0):
self.rot[1] += delta
if self.rot[1] > TWO_PI or self.rot[1] < 0.0:
self.rot[1] %= TWO_PI
def rotation_roll(self, delta=0.0):
self.rot[2] += delta
if self.rot[2] > TWO_PI or self.rot[2] < 0.0:
self.rot[2] %= TWO_PI
# Scale
def get_scale(self):
return self.scale
def get_scale_x(self):
return self.scale[0]
def get_scale_Y(self):
return self.scale[1]
def get_scale_z(self):
return self.scale[2]
def set_scale(self, scale):
self.scale[...] = scale
def set_scale_x(self, x):
self.scale[0] = x
def set_scale_y(self, y):
self.scale[1] = y
def set_scale_z(self, z):
self.scale[2] = z
def scaling(self, scale):
self.scale[...] = self.scale + scale
# update Transform
def update_transform(self, update_inverse_matrix=False, force_update=False):
prev_updated = self.updated
self.updated = False
if any(self.prev_Pos != self.pos) or force_update:
self.prev_Pos[...] = self.pos
self.updated = True
if any(self.prev_Rot != self.rot) or force_update:
self.prev_Rot[...] = self.rot
self.updated = True
# Matrix Rotation - faster
matrix_rotation(self.rotationMatrix, *self.rot)
matrix_to_vectors(self.rotationMatrix, self.left, self.up, self.front)
# Euler Rotation - slow
# p = get_rotation_matrix_x(self.rot[0])
# y = get_rotation_matrix_y(self.rot[1])
# r = get_rotation_matrix_z(self.rot[2])
# self.rotationMatrix = np.dot(p, np.dot(y, r))
# matrix_to_vectors(self.rotationMatrix, self.right, self.up, self.front)
# Quaternion Rotation - slower
# euler_to_quaternion(*self.rot, self.quat)
# quaternion_to_matrix(self.quat, self.rotationMatrix)
# matrix_to_vectors(self.rotationMatrix, self.right, self.up, self.front)
if any(self.prev_Scale != self.scale) or force_update:
self.prev_Scale[...] = self.scale
self.updated = True
if prev_updated or self.updated:
self.prev_matrix[...] = self.matrix
if update_inverse_matrix:
self.prev_inverse_matrix[...] = self.inverse_matrix
if self.updated:
self.matrix[...] = self.local
transform_matrix(self.matrix, self.pos, self.rotationMatrix, self.scale)
if update_inverse_matrix:
# self.inverse_matrix[...] = np.linalg.inv(self.matrix)
self.inverse_matrix[...] = self.local
inverse_transform_matrix(self.inverse_matrix, self.pos, self.rotationMatrix, self.scale)
return self.updated
def get_transform_infos(self):
text = "\tPosition : " + " ".join(["%2.2f" % i for i in self.pos])
text += "\n\tRotation : " + " ".join(["%2.2f" % i for i in self.rot])
text += "\n\tFront : " + " ".join(["%2.2f" % i for i in self.front])
text += "\n\tLeft : " + " ".join(["%2.2f" % i for i in self.left])
text += "\n\tUp : " + " ".join(["%2.2f" % i for i in self.up])
text += "\n\tMatrix"
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[0, :]])
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[1, :]])
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[2, :]])
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[3, :]])
return text
| bsd-2-clause | -1,846,344,314,063,186,700 | 27.682819 | 104 | 0.530948 | false | 3.20897 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.