repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
dschetel/counter | counter.py | 1 | 1557 | #!/usr/local/bin/python
# TimeTracking Program for Projects
from datetime import datetime
import time
from decimal import *
import sys
import os
import select
from termcolor import colored
os.system('cls' if os.name == 'nt' else 'clear')
# Set path for datafile
PATH='./tTrackData.txt'
getcontext().prec = 2
def openDatafile():
if os.path.isfile(PATH):
print "Datafile exists"
file = open("tTrackData.txt", 'r')
mins_old = file.readlines()
return mins_old
else:
file = open(PATH, 'w')
file.write("0")
print "New datafile created"
mins_old = [0]
return mins_old
file.close
def timer(mins_old):
print "You already worked for %s hours\n" % (Decimal(mins_old[0])/60)
run = raw_input("Press Enter to start / 'no' to abort > ")
now = datetime.now()
mins_old = mins_old[0]
secs = 0
if run == "no":
print "Program terminated"
sys.exit()
else:
while True:
os.system('cls' if os.name == 'nt' else 'clear')
print "Process started at %s:%s" %(now.hour, now.minute)
print colored("%s minute(s)" %(int(secs)/60),'green')
print '\n Press Enter to stop'
time.sleep(1)
secs += 1
if sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = raw_input()
return secs
break
def printFile(mins_old, secs):
print "Final time: %s hours" %(Decimal((secs/60) + int(mins_old[0]))/60)
file = open(PATH, "w")
file.write(repr(int(mins_old[0]) + secs/60 + 1))
file.close()
print "TimeTracking Program \n -------------------"
mins_old = openDatafile()
secs = timer(mins_old)
printFile(mins_old, secs) | gpl-2.0 | -6,000,632,630,961,236,000 | 21.257143 | 73 | 0.647399 | false |
matthijsvk/multimodalSR | code/combinedSR/combinedNN_tools.py | 1 | 117751 | from __future__ import print_function
import logging # debug < info < warn < error < critical # from https://docs.python.org/3/howto/logging-cookbook.html
import traceback
import theano
import theano.tensor as T
from tqdm import tqdm
logger_combinedtools = logging.getLogger('combined.tools')
logger_combinedtools.setLevel(logging.DEBUG)
from general_tools import *
import os
import time
import lasagne
import lasagne.layers as L
import lasagne.objectives as LO
import numpy as np
import preprocessingCombined
class AttentionLayer(lasagne.layers.Layer):
'''
A layer which computes a weighted average across the second dimension of
its input, where the weights are computed according to the third dimension.
This results in the second dimension being flattened. This is an attention
mechanism - which "steps" (in the second dimension) are attended to is
determined by a learned transform of the features.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
W : Theano shared variable, numpy array or callable
An initializer for the weights of the layer. If a shared variable or a
numpy array is provided the shape should be (num_inputs,).
b : Theano shared variable, numpy array, callable or None
An initializer for the biases of the layer. If a shared variable or a
numpy array is provided the shape should be () (it is a scalar).
If None is provided the layer will have no biases.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
'''
def __init__(self, incoming, W=lasagne.init.Normal(),
b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.tanh,
**kwargs):
super(AttentionLayer, self).__init__(incoming, **kwargs)
# Use identity nonlinearity if provided nonlinearity is None
self.nonlinearity = (lasagne.nonlinearities.identity
if nonlinearity is None else nonlinearity)
# Add weight vector parameter
self.W = self.add_param(W, (self.input_shape[2],), name="W")
if b is None:
self.b = None
else:
# Add bias scalar parameter
self.b = self.add_param(b, (), name="b", regularizable=False)
def get_output_shape_for(self, input_shape):
return (input_shape[0], input_shape[-1])
def get_output_for(self, input, **kwargs):
# Dot with W to get raw weights, shape=(n_batch, n_steps)
activation = T.dot(input, self.W)
# Add bias
if self.b is not None:
activation = activation + self.b
# Apply nonlinearity
activation = self.nonlinearity(activation)
# Perform softmax
activation = T.exp(activation)
activation /= activation.sum(axis=1).dimshuffle(0, 'x')
# Weight steps
weighted_input = input * activation.dimshuffle(0, 1, 'x')
# Compute weighted average (summing because softmax is normed)
return weighted_input.sum(axis=1)
class NeuralNetwork:
network = None
training_fn = None
best_param = None
best_error = 100
curr_epoch, best_epoch = 0, 0
X = None
Y = None
network_train_info = [[], [], []]
def __init__(self, architecture, data=None, loadPerSpeaker = True, dataset="TCDTIMIT", test_dataset="TCDTIMIT",
batch_size=1, num_features=39, num_output_units=39,
lstm_hidden_list=(100,), bidirectional=True, audio_features='conv',
cnn_network="google", cnn_features='dense', lipRNN_hidden_list=None, lipRNN_bidirectional=True, lipRNN_features="rawRNNfeatures",
dense_hidden_list=(512,), combinationType='FC',save_name=None,
seed=int(time.time()), model_paths={}, debug=False, verbose=False, logger=logger_combinedtools):
self.dataset= dataset
self.test_dataset = test_dataset
self.loadPerSpeaker = loadPerSpeaker
self.model_paths = model_paths
self.num_output_units = num_output_units
self.num_features = num_features
self.batch_size = batch_size
self.epochsNotImproved = 0 # keep track, to know when to stop training
self.combinationType = combinationType
# for storage of training info
self.network_train_info = {
'train_cost': [],
'val_cost': [], 'val_acc': [], 'val_topk_acc': [],
'test_cost': [], 'test_acc': [], 'test_topk_acc': [],
'nb_params': {}
} # used to be list of lists
if architecture == "combined":
if data != None:
images_train, mfccs_train, audioLabels_train, validLabels_train, validAudioFrames_train = data
# import pdb;pdb.set_trace()
self.images = images_train[0] # images are stored per video file. batch_size is for audio
self.mfccs = mfccs_train[:batch_size]
self.audioLabels = audioLabels_train[:batch_size]
self.validLabels = validLabels_train[:batch_size]
self.validAudioFrames = validAudioFrames_train[:batch_size]
# import pdb;pdb.set_trace()
self.masks = generate_masks(inputs=self.mfccs, valid_frames=self.validAudioFrames,
batch_size=len(self.mfccs),
logger=logger_combinedtools)
self.mfccs = pad_sequences_X(self.mfccs) # shouldn't change shape because batch_size == 1
self.audioLabels = pad_sequences_y(self.audioLabels) # these aren't actually used
self.validLabels = pad_sequences_y(self.validLabels)
self.validAudioFrames = pad_sequences_y(self.validAudioFrames)
if verbose:
logger.debug('images.shape: %s', len(self.images))
logger.debug('images[0].shape: %s', self.images[0].shape)
logger.debug('images[0][0][0].type: %s', type(self.images[0][0][0]))
logger.debug('y.shape: %s', self.audioLabels.shape)
logger.debug('y[0].shape: %s', self.audioLabels[0].shape)
logger.debug('y[0][0].type: %s', type(self.audioLabels[0][0]))
logger.debug('masks.shape: %s', self.masks.shape)
logger.debug('masks[0].shape: %s', self.masks[0].shape)
logger.debug('masks[0][0].type: %s', type(self.masks[0][0]))
logger.info("NUM FEATURES: %s", num_features)
# create Theano variables and generate the networks
self.LR_var = T.scalar('LR', dtype=theano.config.floatX)
self.targets_var = T.imatrix('targets') # 2D for the RNN (1 many frames (and targets) per example)
self.CNN_targets_var = T.ivector('targets') # 1D for the CNN (1 target per example)
## AUDIO PART ##
self.audio_inputs_var = T.tensor3('audio_inputs')
self.audio_masks_var = T.matrix('audio_masks')
self.audio_valid_frames_var = T.imatrix('valid_indices')
self.audioNet_dict, self.audioNet_lout, self.audioNet_lout_flattened, self.audioNet_lout_features = \
self.build_audioRNN(n_hidden_list=lstm_hidden_list, bidirectional=bidirectional,
seed=seed, debug=debug, logger=logger)
if audio_features == 'dense': # audioNet_lout_flattened output shape: (nbValidFrames, 39 phonemes)
self.audioNet_lout_features = self.audioNet_lout_flattened
# else: audioNet_lout_flattened output shape: (nbValidFrames, nbLSTMunits)
## LIPREADING PART ##
self.CNN_input_var = T.tensor4('cnn_input')
# batch size is number of valid frames in each video
if "google" in cnn_network:
if "binary" in cnn_network:
self.CNN_dict, self.CNN_lout, self.CNN_lout_features = self.build_google_binary_CNN()
else:
self.CNN_dict, self.CNN_lout, self.CNN_lout_features = self.build_google_CNN()
elif "resnet50" in cnn_network:
self.CNN_dict, self.CNN_lout, self.CNN_lout_features = self.build_resnet50_CNN()
elif "cifar10_v2" in cnn_network:
self.CNN_dict, self.CNN_lout, self.CNN_lout_features = self.build_cifar10_CNN_v2()
elif "cifar10" in cnn_network:
self.CNN_dict, self.CNN_lout, self.CNN_lout_features = self.build_cifar10_CNN_v2()
# CNN_lout_features output shape = (nbValidFrames, 512x7x7)
self.cnn_features = cnn_features
# for CNN-LSTM combination networks
self.lipreadingType = 'CNN'
if lipRNN_hidden_list != None: #add LSTM layers on top of the CNN
self.lipreadingType = 'CNN_LSTM'
# input to LSTM network: conv features, or with dense softmax layer in between?
# direct conv outputs is 512x7x7 = 25.088 features -> huge networks. Might need to reduce size
if cnn_features == 'dense':
self.lipreadingRNN_dict, self.lipreading_lout_features = self.build_lipreadingRNN(self.CNN_lout,
lipRNN_hidden_list,
bidirectional=lipRNN_bidirectional)
else:
self.lipreadingRNN_dict, self.lipreading_lout_features = self.build_lipreadingRNN(self.CNN_lout_features,
lipRNN_hidden_list,
bidirectional=lipRNN_bidirectional)
# For lipreading only: input to softmax FC layer now not from conv layer, but from LSTM features that are put on top of the CNNs
self.lipreading_lout = self.build_softmax(inputLayer = self.lipreading_lout_features, nbClasses=self.num_output_units)
if lipRNN_features == 'dense':
self.lipreading_lout_features = self.lipreading_lout
else: #only use the CNN
if cnn_features == 'dense':
self.lipreading_lout_features = self.CNN_lout
else:
self.lipreading_lout_features = self.CNN_lout_features
self.lipreading_lout = self.CNN_lout
# # You can use this to get the shape of the raw features (before FC layers), which needs to be hard-coded in the build_<networkName>() function
# logger_combinedtools.debug("lip features shape: %s", self.lipreading_lout_features.output_shape)
# import pdb;pdb.set_trace()
## COMBINED PART ##
# batch size is number of valid frames in each video
self.combined_dict, self.combined_lout = self.build_combined(lipreading_lout=self.lipreading_lout_features,
audio_lout=self.audioNet_lout_features,
dense_hidden_list=dense_hidden_list,
combinationType=combinationType)
logger_combinedtools.debug("output shape: %s", self.combined_lout.output_shape)
#import pdb;pdb.set_trace()
self.loadPreviousResults(save_name)
nb_params = self.getParamsInfo()
self.network_train_info['nb_params'] = nb_params
store_path = save_name + '_trainInfo.pkl'
saveToPkl(store_path, self.network_train_info)
logger_combinedtools.info(" # params lipreading seperate: %s", "{:,}".format(nb_params['nb_lipreading']))
logger_combinedtools.info(" # params audio seperate: %s", "{:,}".format(nb_params['nb_audio']))
logger_combinedtools.info(" # params combining: ")
logger_combinedtools.info(" # params total: %s", "{:,}".format(nb_params['nb_total']))
logger_combinedtools.info(" # params lip features: %s", "{:,}".format(nb_params['nb_lipreading_features']))
logger_combinedtools.info(" # params CNN features: %s", "{:,}".format(nb_params['nb_CNN_used']))
logger_combinedtools.info(" # params lip LSTM: %s", "{:,}".format(nb_params['nb_lipRNN']))
logger_combinedtools.info(" # params audio features: %s", "{:,}".format(nb_params['nb_audio_features']))
logger_combinedtools.info(" # params combining FC: %s", "{:,}".format(nb_params['nb_combining']))
# allLayers= L.get_all_layers(self.lipreading_lout)
# for layer in allLayers:
# logger_combinedtools.debug("layer : %s \t %s", layer, layer.output_shape)
# [layer.output_shape for layer in allLayers[-5:-1]]
# import pdb;pdb.set_trace()
else:
print("ERROR: Invalid argument: The valid architecture arguments are: 'combined'")
def build_audioRNN(self, n_hidden_list=(100,), bidirectional=False,
seed=int(time.time()), debug=False, logger=logger_combinedtools):
# some inspiration from http://colinraffel.com/talks/hammer2015recurrent.pdf
if debug:
logger.debug('\nInputs:');
logger.debug(' X.shape: %s', self.mfccs[0].shape)
logger.debug(' X[0].shape: %s %s %s \n%s', self.mfccs[0][0].shape, type(self.mfccs[0][0]),
type(self.mfccs[0][0][0]), self.mfccs[0][0][:5])
logger.debug('Targets: ');
logger.debug(' Y.shape: %s', self.validLabels.shape)
logger.debug(' Y[0].shape: %s %s %s \n%s', self.validLabels[0].shape, type(self.validLabels[0]),
type(self.validLabels[0][0]),
self.validLabels[0][:5])
logger.debug('Layers: ')
# fix these at initialization because it allows for compiler opimizations
num_output_units = self.num_output_units
num_features = self.num_features
batch_size = self.batch_size
audio_inputs = self.audio_inputs_var
audio_masks = self.audio_masks_var # set MATRIX, not iMatrix!! Otherwise all mask calculations are done by CPU, and everything will be ~2x slowed down!! Also in general_tools.generate_masks()
valid_frames = self.audio_valid_frames_var
net = {}
# shape = (batch_size, batch_max_seq_length, num_features)
net['l1_in'] = L.InputLayer(shape=(batch_size, None, num_features), input_var=audio_inputs)
net['l1_mask'] = L.InputLayer(shape=(batch_size, None), input_var=audio_masks)
if debug:
get_l_in = L.get_output(net['l1_in'])
l_in_val = get_l_in.eval({net['l1_in'].input_var: self.mfccs})
# logger.debug(l_in_val)
logger.debug(' l_in size: %s', l_in_val.shape);
get_l_mask = L.get_output(net['l1_mask'])
l_mask_val = get_l_mask.eval({net['l1_mask'].input_var: self.masks})
# logger.debug(l_in_val)
logger.debug(' l_mask size: %s', l_mask_val.shape);
n_batch, n_time_steps, n_features = net['l1_in'].input_var.shape
logger.debug(" n_batch: %s | n_time_steps: %s | n_features: %s", n_batch, n_time_steps,
n_features)
## LSTM parameters
gate_parameters = L.recurrent.Gate(
W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
b=lasagne.init.Constant(0.))
cell_parameters = L.recurrent.Gate(
W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
# Setting W_cell to None denotes that no cell connection will be used.
W_cell=None, b=lasagne.init.Constant(0.),
# By convention, the cell nonlinearity is tanh in an LSTM.
nonlinearity=lasagne.nonlinearities.tanh)
# generate layers of stacked LSTMs, possibly bidirectional
net['l2_lstm'] = []
for i in range(len(n_hidden_list)):
n_hidden = n_hidden_list[i]
print("One layer, ", n_hidden)
if i == 0:
input = net['l1_in']
else:
input = net['l2_lstm'][i-1] #TODO should be -1
nextForwardLSTMLayer = L.recurrent.LSTMLayer(
input, n_hidden,
# We need to specify a separate input for masks
mask_input=net['l1_mask'],
# Here, we supply the gate parameters for each gate
ingate=gate_parameters, forgetgate=gate_parameters,
cell=cell_parameters, outgate=gate_parameters,
# We'll learn the initialization and use gradient clipping
learn_init=True, grad_clipping=100.)
net['l2_lstm'].append(nextForwardLSTMLayer)
if bidirectional:
input = net['l2_lstm'][-1]
# Use backward LSTM
# The "backwards" layer is the same as the first,
# except that the backwards argument is set to True.
nextBackwardLSTMLayer = L.recurrent.LSTMLayer(
input, n_hidden, ingate=gate_parameters,
mask_input=net['l1_mask'], forgetgate=gate_parameters,
cell=cell_parameters, outgate=gate_parameters,
learn_init=True, grad_clipping=100., backwards=True)
net['l2_lstm'].append(nextBackwardLSTMLayer)
# We'll combine the forward and backward layer output by summing.
# Merge layers take in lists of layers to merge as input.
# The output of l_sum will be of shape (n_batch, max_n_time_steps, n_features)
net['l2_lstm'].append(L.ElemwiseSumLayer([net['l2_lstm'][-2], net['l2_lstm'][-1]]))
# we need to convert (batch_size, seq_length, num_features) to (batch_size * seq_length, num_features) because Dense networks can't deal with 2 unknown sizes
net['l3_reshape'] = L.ReshapeLayer(net['l2_lstm'][-1], (-1, n_hidden_list[-1]))
# Get the output features for passing to the combination network
net['l4_features'] = L.SliceLayer(net['l3_reshape'], indices=valid_frames, axis=0)
net['l4_features'] = L.ReshapeLayer(net['l4_features'], (-1, n_hidden_list[-1]))
# this will output shape(nbValidFrames, nbLSTMunits)
# add some extra layers to get an output for the audio network only
# Now we can apply feed-forward layers as usual for classification
net['l6_dense'] = L.DenseLayer(net['l3_reshape'], num_units=num_output_units,
nonlinearity=lasagne.nonlinearities.softmax)
# # Now, the shape will be (n_batch * n_timesteps, num_output_units). We can then reshape to
# # n_batch to get num_output_units values for each timestep from each sequence
# only use the valid indices
net['l7_out'] = L.ReshapeLayer(net['l6_dense'], (batch_size, -1, num_output_units))
net['l7_out_valid_basic'] = L.SliceLayer(net['l7_out'], indices=valid_frames, axis=1)
net['l7_out_valid_flattened'] = L.ReshapeLayer(net['l7_out_valid_basic'], (-1, num_output_units))
net['l7_out_valid'] = L.ReshapeLayer(net['l7_out_valid_basic'], (batch_size, -1, num_output_units))
if debug:
get_l_out = theano.function([net['l1_in'].input_var, net['l1_mask'].input_var], L.get_output(net['l7_out']))
l_out = get_l_out(self.mfccs, self.masks)
# this only works for batch_size == 1
get_l_out_valid = theano.function([audio_inputs, audio_masks, valid_frames],
L.get_output(net['l7_out_valid']))
try:
l_out_valid = get_l_out_valid(self.mfccs, self.masks, self.validAudioFrames)
logger.debug('\n\n\n l_out: %s | l_out_valid: %s', l_out.shape, l_out_valid.shape);
except:
logger.warning("batchsize not 1, get_valid not working")
if debug: self.print_RNN_network_structure(net)
if debug:import pdb;pdb.set_trace()
return net, net['l7_out_valid'], net['l7_out_valid_flattened'], net['l4_features']
# network from Oxford & Google BBC paper
def build_google_CNN(self, input=None, activation=T.nnet.relu, alpha=0.1, epsilon=1e-4):
input = self.CNN_input_var
nbClasses = self.num_output_units
cnnDict = {}
# input
# store each layer of the network in a dict, for quickly retrieving any layer
cnnDict['l0_in'] = lasagne.layers.InputLayer(
shape=(None, 1, 120, 120), # 5,120,120 (5 = #frames)
input_var=input)
cnnDict['l1_conv1'] = []
cnnDict['l1_conv1'].append(lasagne.layers.Conv2DLayer(
cnnDict['l0_in'],
num_filters=128,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity))
cnnDict['l1_conv1'].append(lasagne.layers.MaxPool2DLayer(cnnDict['l1_conv1'][-1], pool_size=(2, 2)))
cnnDict['l1_conv1'].append(lasagne.layers.BatchNormLayer(
cnnDict['l1_conv1'][-1],
epsilon=epsilon,
alpha=alpha))
cnnDict['l1_conv1'].append(lasagne.layers.NonlinearityLayer(
cnnDict['l1_conv1'][-1],
nonlinearity=activation))
# conv 2
cnnDict['l2_conv2'] = []
cnnDict['l2_conv2'].append(lasagne.layers.Conv2DLayer(
cnnDict['l1_conv1'][-1],
num_filters=256,
filter_size=(3, 3),
stride=(2, 2),
pad=1,
nonlinearity=lasagne.nonlinearities.identity))
cnnDict['l2_conv2'].append(lasagne.layers.MaxPool2DLayer(cnnDict['l2_conv2'][-1], pool_size=(2, 2)))
cnnDict['l2_conv2'].append(lasagne.layers.BatchNormLayer(
cnnDict['l2_conv2'][-1],
epsilon=epsilon,
alpha=alpha))
cnnDict['l2_conv2'].append(lasagne.layers.NonlinearityLayer(
cnnDict['l2_conv2'][-1],
nonlinearity=activation))
# conv3
cnnDict['l3_conv3'] = []
cnnDict['l3_conv3'].append(lasagne.layers.Conv2DLayer(
cnnDict['l2_conv2'][-1],
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity))
cnnDict['l3_conv3'].append(lasagne.layers.NonlinearityLayer(
cnnDict['l3_conv3'][-1],
nonlinearity=activation))
# conv 4
cnnDict['l4_conv4'] = []
cnnDict['l4_conv4'].append(lasagne.layers.Conv2DLayer(
cnnDict['l3_conv3'][-1],
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity))
cnnDict['l4_conv4'].append(lasagne.layers.NonlinearityLayer(
cnnDict['l4_conv4'][-1],
nonlinearity=activation))
# conv 5
cnnDict['l5_conv5'] = []
cnnDict['l5_conv5'].append(lasagne.layers.Conv2DLayer(
cnnDict['l4_conv4'][-1],
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity))
cnnDict['l5_conv5'].append(lasagne.layers.MaxPool2DLayer(
cnnDict['l5_conv5'][-1],
pool_size=(2, 2)))
cnnDict['l5_conv5'].append(lasagne.layers.NonlinearityLayer(
cnnDict['l5_conv5'][-1],
nonlinearity=activation))
# now we have output shape (nbValidFrames, 512,7,7) -> Flatten it.
batch_size = cnnDict['l0_in'].input_var.shape[0]
cnnDict['l6_reshape'] = L.ReshapeLayer(cnnDict['l5_conv5'][-1], (batch_size, 25088))
# # conv 6
# cnnDict['l6_conv6'] = []
# cnnDict['l6_conv6'].append(lasagne.layers.Conv2DLayer(
# cnnDict['l5_conv5'][-1],
# num_filters=128,
# filter_size=(3, 3),
# pad=1,
# nonlinearity=lasagne.nonlinearities.identity))
# cnnDict['l6_conv6'].append(lasagne.layers.MaxPool2DLayer(
# cnnDict['l6_conv6'][-1],
# pool_size=(2, 2)))
# cnnDict['l6_conv6'].append(lasagne.layers.NonlinearityLayer(
# cnnDict['l6_conv6'][-1],
# nonlinearity=activation))
# # this will output shape (nbValidFrames, 512,7,7). Flatten it.
# batch_size = cnnDict['l0_in'].input_var.shape[0]
# cnnDict['l6_reshape'] = L.ReshapeLayer(cnnDict['l6_conv6'][-1], (batch_size, 25088))
# disable this layer for normal phoneme recognition
# FC layer
# cnnDict['l6_fc'] = []
# cnnDict['l6_fc'].append(lasagne.layers.DenseLayer(
# cnnDict['l5_conv5'][-1],
# nonlinearity=lasagne.nonlinearities.identity,
# num_units=256))
#
# cnnDict['l6_fc'].append(lasagne.layers.NonlinearityLayer(
# cnnDict['l6_fc'][-1],
# nonlinearity=activation))
cnnDict['l7_out'] = lasagne.layers.DenseLayer(
cnnDict['l5_conv5'][-1],
nonlinearity=lasagne.nonlinearities.softmax,
num_units=nbClasses)
# cnn = lasagne.layers.BatchNormLayer(
# cnn,
# epsilon=epsilon,
# alpha=alpha)
return cnnDict, cnnDict['l7_out'], cnnDict['l6_reshape']
def build_google_binary_CNN(self, input=None, activation=T.nnet.relu, alpha=0.1, epsilon=1e-4):
alpha = .1
epsilon = 1e-4
activation = binary_net.binary_tanh_unit
binary = True
stochastic = False
H = 1.
W_LR_scale = "Glorot"
# Resnet stuff
def build_resnet50_CNN(self, input=None, activation=T.nnet.relu, alpha=0.1, epsilon=1e-4):
input = self.CNN_input_var
nbClasses = self.num_output_units
from lasagne.layers import BatchNormLayer, Conv2DLayer as ConvLayer, DenseLayer, ElemwiseSumLayer, InputLayer, \
NonlinearityLayer, Pool2DLayer as PoolLayer
from lasagne.nonlinearities import rectify, softmax
def build_simple_block(incoming_layer, names,
num_filters, filter_size, stride, pad,
use_bias=False, nonlin=rectify):
"""Creates stacked Lasagne layers ConvLayer -> BN -> (ReLu)
Parameters:
----------
incoming_layer : instance of Lasagne layer
Parent layer
names : list of string
Names of the layers in block
num_filters : int
Number of filters in convolution layer
filter_size : int
Size of filters in convolution layer
stride : int
Stride of convolution layer
pad : int
Padding of convolution layer
use_bias : bool
Whether to use bias in conlovution layer
nonlin : function
Nonlinearity type of Nonlinearity layer
Returns
-------
tuple: (net, last_layer_name)
net : dict
Dictionary with stacked layers
last_layer_name : string
Last layer name
"""
net = []
net.append((
names[0],
ConvLayer(incoming_layer, num_filters, filter_size, pad, stride,
flip_filters=False, nonlinearity=None) if use_bias
else ConvLayer(incoming_layer, num_filters, filter_size, stride, pad, b=None,
flip_filters=False, nonlinearity=None)
))
net.append((
names[1],
BatchNormLayer(net[-1][1])
))
if nonlin is not None:
net.append((
names[2],
NonlinearityLayer(net[-1][1], nonlinearity=nonlin)
))
return dict(net), net[-1][0]
def build_residual_block(incoming_layer, ratio_n_filter=1.0, ratio_size=1.0, has_left_branch=False,
upscale_factor=4, ix=''):
"""Creates two-branch residual block
Parameters:
----------
incoming_layer : instance of Lasagne layer
Parent layer
ratio_n_filter : float
Scale factor of filter bank at the input of residual block
ratio_size : float
Scale factor of filter size
has_left_branch : bool
if True, then left branch contains simple block
upscale_factor : float
Scale factor of filter bank at the output of residual block
ix : int
Id of residual block
Returns
-------
tuple: (net, last_layer_name)
net : dict
Dictionary with stacked layers
last_layer_name : string
Last layer name
"""
simple_block_name_pattern = ['res%s_branch%i%s', 'bn%s_branch%i%s', 'res%s_branch%i%s_relu']
net = {}
# right branch
net_tmp, last_layer_name = build_simple_block(
incoming_layer, map(lambda s: s % (ix, 2, 'a'), simple_block_name_pattern),
int(lasagne.layers.get_output_shape(incoming_layer)[1] * ratio_n_filter), 1, int(1.0 / ratio_size),
0)
net.update(net_tmp)
net_tmp, last_layer_name = build_simple_block(
net[last_layer_name], map(lambda s: s % (ix, 2, 'b'), simple_block_name_pattern),
lasagne.layers.get_output_shape(net[last_layer_name])[1], 3, 1, 1)
net.update(net_tmp)
net_tmp, last_layer_name = build_simple_block(
net[last_layer_name], map(lambda s: s % (ix, 2, 'c'), simple_block_name_pattern),
lasagne.layers.get_output_shape(net[last_layer_name])[1] * upscale_factor, 1, 1, 0,
nonlin=None)
net.update(net_tmp)
right_tail = net[last_layer_name]
left_tail = incoming_layer
# left branch
if has_left_branch:
net_tmp, last_layer_name = build_simple_block(
incoming_layer, map(lambda s: s % (ix, 1, ''), simple_block_name_pattern),
int(lasagne.layers.get_output_shape(incoming_layer)[1] * 4 * ratio_n_filter), 1,
int(1.0 / ratio_size),
0,
nonlin=None)
net.update(net_tmp)
left_tail = net[last_layer_name]
net['res%s' % ix] = ElemwiseSumLayer([left_tail, right_tail], coeffs=1)
net['res%s_relu' % ix] = NonlinearityLayer(net['res%s' % ix], nonlinearity=rectify)
return net, 'res%s_relu' % ix
net = {}
net['input'] = InputLayer(shape=(None, 1, 120, 120), input_var=input)
sub_net, parent_layer_name = build_simple_block(
net['input'], ['conv1', 'bn_conv1', 'conv1_relu'],
64, 7, 3, 2, use_bias=True)
net.update(sub_net)
net['pool1'] = PoolLayer(net[parent_layer_name], pool_size=3, stride=2, pad=0, mode='max', ignore_border=False)
block_size = list('abc')
parent_layer_name = 'pool1'
for c in block_size:
if c == 'a':
sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1, 1, True, 4, ix='2%s' % c)
else:
sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0 / 4, 1, False, 4,
ix='2%s' % c)
net.update(sub_net)
block_size = list('abcd')
for c in block_size:
if c == 'a':
sub_net, parent_layer_name = build_residual_block(
net[parent_layer_name], 1.0 / 2, 1.0 / 2, True, 4, ix='3%s' % c)
else:
sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0 / 4, 1, False, 4,
ix='3%s' % c)
net.update(sub_net)
block_size = list('abcdef')
for c in block_size:
if c == 'a':
sub_net, parent_layer_name = build_residual_block(
net[parent_layer_name], 1.0 / 2, 1.0 / 2, True, 4, ix='4%s' % c)
else:
sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0 / 4, 1, False, 4,
ix='4%s' % c)
net.update(sub_net)
block_size = list('abc')
for c in block_size:
if c == 'a':
sub_net, parent_layer_name = build_residual_block(
net[parent_layer_name], 1.0 / 2, 1.0 / 2, True, 4, ix='5%s' % c)
else:
sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0 / 4, 1, False, 4,
ix='5%s' % c)
net.update(sub_net)
net['pool5'] = PoolLayer(net[parent_layer_name], pool_size=7, stride=1, pad=0,
mode='average_exc_pad', ignore_border=False)
net['fc1000'] = DenseLayer(net['pool5'], num_units=nbClasses,
nonlinearity=None) # number output units = nbClasses (global variable)
net['prob'] = NonlinearityLayer(net['fc1000'], nonlinearity=softmax)
# now we have output shape (nbValidFrames, 2048,1,1) -> Flatten it.
batch_size = net['input'].input_var.shape[0]
cnn_reshape = L.ReshapeLayer(net['pool5'], (batch_size, 2048))
return net, net['prob'], cnn_reshape
def build_cifar10_CNN_v2(self, input=None, nbClasses=39):
from lasagne.layers import BatchNormLayer, Conv2DLayer as ConvLayer, DenseLayer, ElemwiseSumLayer, InputLayer, \
NonlinearityLayer, Pool2DLayer as PoolLayer, DropoutLayer
from lasagne.nonlinearities import rectify, softmax
input = self.CNN_input_var
nbClasses = self.num_output_units
net = {}
net['input'] = InputLayer((None, 1, 120, 120), input_var=input)
net['conv1'] = ConvLayer(net['input'],
num_filters=192,
filter_size=5,
pad=2,
flip_filters=False)
net['cccp1'] = ConvLayer(
net['conv1'], num_filters=160, filter_size=1, flip_filters=False)
net['cccp2'] = ConvLayer(
net['cccp1'], num_filters=96, filter_size=1, flip_filters=False)
net['pool1'] = PoolLayer(net['cccp2'],
pool_size=3,
stride=2,
mode='max',
ignore_border=False)
net['drop3'] = DropoutLayer(net['pool1'], p=0.5)
net['conv2'] = ConvLayer(net['drop3'],
num_filters=192,
filter_size=5,
pad=2,
flip_filters=False)
net['cccp3'] = ConvLayer(
net['conv2'], num_filters=192, filter_size=1, flip_filters=False)
net['cccp4'] = ConvLayer(
net['cccp3'], num_filters=192, filter_size=1, flip_filters=False)
net['pool2'] = PoolLayer(net['cccp4'],
pool_size=3,
stride=2,
mode='average_exc_pad',
ignore_border=False)
net['drop6'] = DropoutLayer(net['pool2'], p=0.5)
net['conv3'] = ConvLayer(net['drop6'],
num_filters=192,
filter_size=3,
pad=1,
flip_filters=False)
net['cccp5'] = ConvLayer(
net['conv3'], num_filters=192, filter_size=1, flip_filters=False)
net['cccp6'] = ConvLayer(
net['cccp5'], num_filters=10, filter_size=1, flip_filters=False)
net['pool3'] = PoolLayer(net['cccp6'],
pool_size=8,
mode='average_exc_pad',
ignore_border=False)
# net['output'] = FlattenLayer(net['pool3'])
# now we have output shape (nbValidFrames, 10,4,4) -> Flatten it.
batch_size = net['input'].input_var.shape[0]
cnn_reshape = L.ReshapeLayer(net['pool3'], (batch_size, 160))
net['dense1'] = lasagne.layers.DenseLayer(
net['pool3'],
nonlinearity=lasagne.nonlinearities.identity,
num_units=1024)
net['output'] = lasagne.layers.DenseLayer(
net['dense1'],
nonlinearity=lasagne.nonlinearities.softmax,
num_units=nbClasses)
return net, net['output'], cnn_reshape
def build_cifar10_CNN(self, input=None, activation=T.nnet.relu, alpha=0.1, epsilon=1e-4):
input = self.CNN_input_var
nbClasses = self.num_output_units
cnn_in = lasagne.layers.InputLayer(
shape=(None, 1, 120, 120),
input_var=input)
# 128C3-128C3-P2
cnn = lasagne.layers.Conv2DLayer(
cnn_in,
num_filters=128,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
cnn = lasagne.layers.Conv2DLayer(
cnn,
num_filters=128,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
# 256C3-256C3-P2
cnn = lasagne.layers.Conv2DLayer(
cnn,
num_filters=256,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
cnn = lasagne.layers.Conv2DLayer(
cnn,
num_filters=256,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
#
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
#
# 512C3-512C3-P2
cnn = lasagne.layers.Conv2DLayer(
cnn,
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
#
cnn = lasagne.layers.Conv2DLayer(
cnn,
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
# print(cnn.output_shape)
# now we have output shape (nbValidFrames, 512,15,15) -> Flatten it.
batch_size = cnn_in.input_var.shape[0]
cnn_reshape = L.ReshapeLayer(cnn, (batch_size, 115200))
cnn = lasagne.layers.DenseLayer(
cnn,
nonlinearity=lasagne.nonlinearities.identity,
num_units=256)
#
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
cnn = lasagne.layers.DenseLayer(
cnn,
nonlinearity=lasagne.nonlinearities.softmax,
num_units=nbClasses)
return {}, cnn, cnn_reshape
def build_lipreadingRNN(self, input, n_hidden_list=(100,), bidirectional=False, debug=False, logger=logger_combinedtools):
net = {}
#CNN output: (time_seq, features)
# LSTM need (batch_size, time_seq, features). Batch_size = # videos processed in parallel = 1
nbFeatures = input.output_shape[1]
net['l1_in']= L.ReshapeLayer(input, (1, -1, nbFeatures))# 39 or 25088 (with dense softmax or direct conv outputs)
if debug:
n_batch, n_time_steps, n_features = net['l1_in'].output_shape
logger.debug(" n_batch: %s | n_time_steps: %s | n_features: %s", n_batch, n_time_steps, n_features)
## LSTM parameters
# All gates have initializers for the input-to-gate and hidden state-to-gate
# weight matrices, the cell-to-gate weight vector, the bias vector, and the nonlinearity.
# The convention is that gates use the standard sigmoid nonlinearity,
# which is the default for the Gate class.
gate_parameters = L.recurrent.Gate(
W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
b=lasagne.init.Constant(0.))
cell_parameters = L.recurrent.Gate(
W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
# Setting W_cell to None denotes that no cell connection will be used.
W_cell=None, b=lasagne.init.Constant(0.),
# By convention, the cell nonlinearity is tanh in an LSTM.
nonlinearity=lasagne.nonlinearities.tanh)
# generate layers of stacked LSTMs, possibly bidirectional
net['l2_lstm'] = []
for i in range(len(n_hidden_list)):
n_hidden = n_hidden_list[i]
if i == 0:
input = net['l1_in']
else:
input = net['l2_lstm'][i - 1]
nextForwardLSTMLayer = L.recurrent.LSTMLayer(
incoming=input, num_units=n_hidden,
# Here, we supply the gate parameters for each gate
ingate=gate_parameters, forgetgate=gate_parameters,
cell=cell_parameters, outgate=gate_parameters,
# We'll learn the initialization and use gradient clipping
learn_init=True, grad_clipping=100.)
net['l2_lstm'].append(nextForwardLSTMLayer)
if bidirectional:
input = net['l2_lstm'][-1]
# Use backward LSTM
# The "backwards" layer is the same as the first,
# except that the backwards argument is set to True.
nextBackwardLSTMLayer = L.recurrent.LSTMLayer(
input, n_hidden, ingate=gate_parameters,
forgetgate=gate_parameters,
cell=cell_parameters, outgate=gate_parameters,
learn_init=True, grad_clipping=100., backwards=True)
net['l2_lstm'].append(nextBackwardLSTMLayer)
# The output of l_sum will be of shape (n_batch, max_n_time_steps, n_features)
net['l2_lstm'].append(L.ElemwiseSumLayer([net['l2_lstm'][-2], net['l2_lstm'][-1]]))
# we need to convert (batch_size, seq_length, num_features) to (batch_size * seq_length, num_features) because Dense networks can't deal with 2 unknown sizes
net['l3_reshape'] = L.ReshapeLayer(net['l2_lstm'][-1], (-1, n_hidden_list[-1]))
# print(L.count_params(net['l1_in']))
# lstmParams = L.count_params(net['l2_lstm']) - L.count_params(net['l1_in'])
# print(lstmParams)
# if lstmParams > 6000000:
# print([L.count_params(net['l2_lstm'][i]) - L.count_params(net['l2_lstm'][i - 1]) for i in range(1, len(net['l2_lstm']))])
# print([L.count_params(net['l2_lstm'][i]) - L.count_params(net['l2_lstm'][i - 1]) for i in
# range(1, len(net['l2_lstm']))])
# import pdb;pdb.set_trace()
if debug:
self.print_RNN_network_structure(net)
return net, net['l3_reshape'] #output shape: (nbFrames, nbHiddenLSTMunits)
def build_softmax(self, inputLayer, nbClasses=39):
softmaxLayer = lasagne.layers.DenseLayer(
inputLayer,
nonlinearity=lasagne.nonlinearities.softmax,
num_units=nbClasses)
return softmaxLayer
def build_combined(self, lipreading_lout, audio_lout, dense_hidden_list, combinationType ='FF', debug=False):
# (we process one video at a time)
# CNN_lout and RNN_lout should be shaped (batch_size, nbFeatures) with batch_size = nb_valid_frames in this video
# for CNN_lout: nbFeatures = 512x7x7 = 25.088
# for RNN_lout: nbFeatures = nbUnits(last LSTM layer)
combinedNet = {}
if combinationType == 'attention':
# using attention network from https://github.com/craffel/ff-attention/b
HIDDEN_SIZE = 256 #by default
lip_n_features = lipreading_lout.output_shape[-1]
audio_n_features = audio_lout.output_shape[-1]
assert lip_n_features == audio_n_features #reshaping a few lines below only works with different seq lengths if we have the same feature size...
lip_reshaped = L.ReshapeLayer(lipreading_lout, (-1, 1, lip_n_features))
audio_reshaped = L.ReshapeLayer(audio_lout, (-1, 1, audio_n_features))
combinedNet['l_concat'] = L.ConcatLayer([lip_reshaped, audio_reshaped], axis=2)
combinedNet['l_concat'] = L.ReshapeLayer(combinedNet['l_concat'],(-1,2,lip_n_features))
print(combinedNet['l_concat'].output_shape)
# shape: (nb_seq, 2, nb_features)
# Construct network
time_step, input_mode, n_features = combinedNet['l_concat'].output_shape
# Store a dictionary which conveniently maps names to layers we will need to access later
print(time_step, input_mode, n_features)
# Add dense input layer -> reshape to number of features of input layer
layer = lasagne.layers.ReshapeLayer(
combinedNet['l_concat'], (-1, n_features), name='Reshape 1')
layer = lasagne.layers.DenseLayer(
layer, HIDDEN_SIZE, W=lasagne.init.HeNormal(), name='Input dense',
nonlinearity=lasagne.nonlinearities.leaky_rectify)
layer = lasagne.layers.ReshapeLayer(
layer, (-1, input_mode, HIDDEN_SIZE), name='Reshape 2')
print("attention input: ", layer.output_shape)
# Add the ATTENTION layer to aggregate over the different input modes (lipreading and audio)
# A layer which computes a weighted average across the second dimension of
# its input, where the weights are computed according to the third dimension.
# This results in the second dimension being flattened. This is an attention
# mechanism - which "steps" (in the second dimension) are attended to is
# determined by a learned transform of the features.
layer = AttentionLayer(
layer,
W=lasagne.init.Normal(1. / np.sqrt(layer.output_shape[-1])),
name='Attention')
print("attention output: ", layer.output_shape)
# TODO: add more dense layers after attention networkm-> better performance? (512,512,512)
# Add dense hidden layer
layer = lasagne.layers.DenseLayer(
layer, HIDDEN_SIZE, W=lasagne.init.HeNormal(), name='Out dense 1',
nonlinearity=lasagne.nonlinearities.leaky_rectify)
# Add final dense layer, whose bias is initialized to the target mean
layer = lasagne.layers.DenseLayer(
layer, num_units=self.num_output_units, W=lasagne.init.HeNormal(), name='Out dense 2',
nonlinearity=lasagne.nonlinearities.softmax)
# Keep track of the final layer
combinedNet['l_out'] = layer
else: #simple dense combination
combinedNet['l_concat'] = L.ConcatLayer([lipreading_lout, audio_lout], axis=1)
if debug:
logger_combinedtools.debug("CNN output shape: %s", lipreading_lout.output_shape)
logger_combinedtools.debug("RNN output shape: %s", audio_lout.output_shape)
import pdb;pdb.set_trace()
combinedNet['l_dense'] = []
for i in range(len(dense_hidden_list)):
n_hidden = dense_hidden_list[i]
if i == 0:
input = combinedNet['l_concat']
else:
input = combinedNet['l_dense'][i - 1]
nextDenseLayer = L.DenseLayer(input,
nonlinearity=lasagne.nonlinearities.rectify,
num_units=n_hidden)
#nextDenseLayer = L.DropoutLayer(nextDenseLayer, p=0.3)# TODO does dropout work?
combinedNet['l_dense'].append(nextDenseLayer)
# final softmax layer
if len(combinedNet['l_dense']) == 0: #if no hidden layers
combinedNet['l_out'] = L.DenseLayer(combinedNet['l_concat'], num_units=self.num_output_units,
nonlinearity=lasagne.nonlinearities.softmax)
else:
combinedNet['l_out'] = L.DenseLayer(combinedNet['l_dense'][-1], num_units=self.num_output_units,
nonlinearity=lasagne.nonlinearities.softmax)
return combinedNet, combinedNet['l_out']
def print_RNN_network_structure(self, net=None, logger=logger_combinedtools):
if net == None: net = self.audioNet_dict
logger.debug("\n PRINTING Audio RNN network: \n %s ", sorted(net.keys()))
for key in sorted(net.keys()):
if 'lstm' in key:
for layer in net['l2_lstm']:
try:
logger.debug(' %12s | in: %s | out: %s', key, layer.input_shape, layer.output_shape)
except:
logger.debug(' %12s | out: %s', key, layer.output_shape)
else:
try:
logger.debug(' %12s | in: %s | out: %s', key, net[key].input_shape, net[key].output_shape)
except:
logger.debug(' %12s | out: %s', key, net[key].output_shape)
return 0
def print_CNN_network_structure(self, net=None, logger=logger_combinedtools):
if net == None:
cnnDict = self.CNN_dict
else:
cnnDict = net
print("\n PRINTING image CNN structure: \n %s " % (sorted(cnnDict.keys())))
for key in sorted(cnnDict.keys()):
print(key)
if 'conv' in key and type(cnnDict[key]) == list:
for layer in cnnDict[key]:
try:
print(' %12s \nin: %s | out: %s' % (layer, layer.input_shape, layer.output_shape))
except:
print(' %12s \nout: %s' % (layer, layer.output_shape))
else:
try:
print(' %12s \nin: %s | out: %s' % (
cnnDict[key], cnnDict[key].input_shape, cnnDict[key].output_shape))
except:
print(' %12s \nout: %s' % (cnnDict[key], cnnDict[key].output_shape))
return 0
def getParamsInfo(self):
# print number of parameters
nb_CNN_features = lasagne.layers.count_params(self.CNN_lout_features)
nb_CNN = lasagne.layers.count_params(self.CNN_lout)
nb_lipreading_features = lasagne.layers.count_params(self.lipreading_lout_features)
nb_lipreading = L.count_params(self.lipreading_lout)
nb_audio_features = lasagne.layers.count_params(self.audioNet_lout_features)
nb_audio = lasagne.layers.count_params(self.audioNet_lout)
nb_total = lasagne.layers.count_params(self.combined_lout)
if self.lipreadingType == 'CNN_LSTM': #features is then the output of the LSTM on top of CNN, so contains all the lipreading params
if self.cnn_features == 'conv':
nb_CNN_used = nb_CNN_features
else: nb_CNN_used = nb_CNN
nb_lipRNN = nb_lipreading - nb_CNN_used
else:
nb_CNN_used = nb_lipreading_features
nb_lipRNN = 0
nb_combining = nb_total - nb_lipreading_features - nb_audio
nb_params = {}
nb_params['nb_lipreading'] = nb_lipreading
nb_params['nb_audio'] = nb_audio
nb_params['nb_total'] = nb_total
nb_params['nb_audio_features'] = nb_audio_features
nb_params['nb_lipreading_features'] = nb_lipreading_features
nb_params['nb_CNN_used'] = nb_CNN_used
nb_params['nb_lipRNN'] = nb_lipRNN
nb_params['nb_combining'] = nb_combining
return nb_params
# return True if successful load, false otherwise
def load_model(self, model_type, roundParams=False, logger=logger_combinedtools):
if not os.path.exists(self.model_paths[model_type]):
#logger.warning("WARNING: Loading %s Failed. \n path: %s", model_type, self.model_paths[model_type])
return False
# restore network weights
with np.load(self.model_paths[model_type]) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
if len(param_values) == 1: param_values = param_values[0]
if model_type == 'audio':
lout = self.audioNet_lout
elif model_type == 'CNN':
lout = self.CNN_lout
elif model_type == 'CNN_LSTM':
lout = self.lipreading_lout
elif model_type == 'combined':
lout = self.combined_lout
else:
logger.error('Wrong network type. No weights loaded')#.format(model_type))
return False
try:
if roundParams: lasagne.layers.set_all_param_values(lout, self.round_params(param_values))
else:
#print(len(param_values));import pdb;pdb.set_trace();
lasagne.layers.set_all_param_values(lout, param_values)
except:
logger.warning('Warning: %s', traceback.format_exc()) # , model_path)
import pdb;pdb.set_trace()
logger.info("Loading %s parameters successful.", model_type)
return True
def round_params(self, param_values):
for i in range(len(param_values)):
param_values[i] = param_values[i].astype(np.float16)
param_values[i] = param_values[i].astype(np.float32)
return param_values
# set as many network parameters as possible by hierarchical loading of subnetworks
# eg for combined: if no traied combined network, try to load subnets of audio and lipreading
def setNetworkParams(self, runType, overwriteSubnets=False, roundParams=False, logger=logger_combinedtools):
if runType == 'combined':
logger.info("\nAttempting to load combined model: %s", self.model_paths['combined'])
success = self.load_model(model_type='combined', roundParams=roundParams)
if (not success) or overwriteSubnets:
if not success: logger.warning("No complete combined network found, loading parts...")
else: logger.warning("Overwrite subnets = True, overwriting...")
logger.info("CNN : %s", self.model_paths['CNN'])
self.load_model(model_type='CNN', roundParams=roundParams)
if self.lipreadingType == 'CNN_LSTM': # LIP_RNN_HIDDEN_LIST != None:
logger.info("CNN_LSTM : %s", self.model_paths['CNN_LSTM'])
self.load_model(model_type='CNN_LSTM', roundParams=roundParams)
logger.info("Audio : %s", self.model_paths['audio'])
self.load_model(model_type='audio', roundParams=roundParams)
elif runType == 'lipreading':
if self.lipreadingType == 'CNN_LSTM':
logger.info("\nAttempting to load lipreading CNN_LSTM model: %s",
self.model_paths['CNN_LSTM'])
#try to load CNN_LSTM; if not works just load the CNN so you can train the LSTM based on that
success = self.load_model(model_type='CNN_LSTM', roundParams=roundParams)
if not success:
logger.warning("No complete CNN_LSTM network found, loading parts...")
self.load_model(model_type='CNN', roundParams=roundParams)
else:
logger.info("\nAttempting to load lipreading CNN model: %s", self.model_paths['CNN'])
success = self.load_model(model_type='CNN', roundParams=roundParams)
else: ## runType == 'audio':
logger.info("\nAttempting to load audio model: %s",
self.model_paths['audio'])
success = self.load_model(model_type='audio', roundParams=roundParams)
return success
def save_model(self, model_name, logger=logger_combinedtools):
if not os.path.exists(os.path.dirname(model_name)):
os.makedirs(os.path.dirname(model_name))
np.savez(model_name + '.npz', self.best_param)
def build_functions(self, runType, train=False, allowSubnetTraining=False, debug=False, logger=logger_combinedtools):
k = 3; # top k accuracy
##########################
## For Lipreading part ##
##########################
if runType == 'lipreading':
# Targets are 2D for the LSTM, but needs only 1D for the CNN -> need to flatten everywhere
#import pdb;pdb.set_trace()
# For information: only CNN classification, with softmax to 39 phonemes
CNN_test_network_output = L.get_output(self.CNN_lout, deterministic=True)
CNN_test_loss = LO.categorical_crossentropy(CNN_test_network_output, self.targets_var.flatten());
CNN_test_loss = CNN_test_loss.mean()
CNN_test_acc = T.mean(T.eq(T.argmax(CNN_test_network_output, axis=1), self.targets_var.flatten()),
dtype=theano.config.floatX)
CNN_top3_acc = T.mean(lasagne.objectives.categorical_accuracy(CNN_test_network_output, self.targets_var.flatten(), top_k=k))
self.CNN_val_fn = theano.function([self.CNN_input_var, self.targets_var], [CNN_test_loss,
CNN_test_acc,
CNN_top3_acc])
# The whole lipreading network (different if CNN-LSTM architecture, otherwise same as CNN-softmax)
# for validation: disable dropout etc layers -> deterministic
lipreading_test_network_output = L.get_output(self.lipreading_lout, deterministic=True)
lipreading_preds = T.argmax(lipreading_test_network_output, axis=1) #prediction with maximum probability
#self.lipreading_predictions_fn = theano.function([self.CNN_input_var], lipreading_preds)
lipreading_test_acc = T.mean(T.eq(T.argmax(lipreading_test_network_output, axis=1), self.targets_var.flatten()),
dtype=theano.config.floatX)
lipreading_test_loss = LO.categorical_crossentropy(lipreading_test_network_output, self.targets_var.flatten());
lipreading_test_loss = lipreading_test_loss.mean()
# Top k accuracy
lipreading_top3_acc = T.mean(lasagne.objectives.categorical_accuracy(lipreading_test_network_output,
self.targets_var.flatten(), top_k=k))
self.lipreading_top3acc_fn = theano.function([self.CNN_input_var, self.targets_var], lipreading_top3_acc)
self.lipreading_val_fn = theano.function([self.CNN_input_var, self.targets_var], [lipreading_test_loss,
lipreading_test_acc,
lipreading_top3_acc])
self.lipreading_val_preds_fn = theano.function([self.CNN_input_var, self.targets_var],
[lipreading_test_loss,
lipreading_test_acc,
lipreading_top3_acc,
lipreading_preds])
if debug:
CNN_test_loss, CNN_test_acc, CNN_top3_acc = self.CNN_val_fn(self.images, self.validLabels)
logger.debug("\n\nCNN network only: \ntest loss: %s \n test acc: %s \n top3_acc: %s",
CNN_test_loss, CNN_test_acc*100.0, CNN_top3_acc*100.0)
lipreading_test_loss, lipreading_test_acc, lipreading_top3_acc = self.lipreading_val_fn(self.images, self.validLabels)
logger.debug("\n\n Lipreading network: \ntest loss: %s \n test acc: %s \n top3_acc: %s",
lipreading_test_loss, lipreading_test_acc * 100.0, lipreading_top3_acc * 100.0)
# For training, use nondeterministic output
lipreading_network_output = L.get_output(self.lipreading_lout, deterministic=False)
self.lipreading_out_fn = theano.function([self.CNN_input_var], lipreading_network_output)
# cross-entropy loss
lipreading_loss_pointwise = LO.categorical_crossentropy(lipreading_network_output, self.targets_var.flatten());
lipreading_loss = lasagne.objectives.aggregate(lipreading_loss_pointwise)
# lipreading_loss = lipreading_loss_pointwise.mean()
# set all params to trainable
lipreading_params = L.get_all_params(self.lipreading_lout, trainable=True)
if self.lipreadingType == 'CNN_LSTM': #only train the LSTM network, don't touch the CNN
if not allowSubnetTraining:
lipreading_params = list(set(lipreading_params) - set(L.get_all_params(self.CNN_lout, trainable=True)))
lipreading_updates = lasagne.updates.adam(loss_or_grads=lipreading_loss, params=lipreading_params, learning_rate=self.LR_var)
# Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
# and returning the corresponding training loss:
self.lipreading_train_fn = theano.function([self.CNN_input_var, self.targets_var, self.LR_var], lipreading_loss, updates=lipreading_updates)
if debug:
output = self.lipreading_out_fn(self.images)
logger.debug(" lipreading output shape: %s", output.shape)
import pdb;pdb.set_trace()
####################
## For Audio Part ##
####################
if runType == 'audio':
# LSTM in lasagne: see https://github.com/craffel/Lasagne-tutorial/blob/master/examples/recurrent.py
# and also http://colinraffel.com/talks/hammer2015recurrent.pdf
if debug:
logger.debug("\n\n Audio Network")
self.print_RNN_network_structure()
# using the lasagne SliceLayer
audio_valid_network_output = L.get_output(self.audioNet_dict['l7_out_valid'])
self.audio_valid_network_output_fn = theano.function(
[self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var], audio_valid_network_output)
audio_valid_network_output_flattened = L.get_output(self.audioNet_lout_flattened)
self.audio_network_output_flattened_fn = theano.function(
[self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var],
audio_valid_network_output_flattened)
audio_valid_predictions = T.argmax(audio_valid_network_output_flattened, axis=1) # TODO axis 1 or 2?
self.audio_predictions_fn = theano.function(
[self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var],
audio_valid_predictions, name='valid_predictions_fn')
# top k accuracy
audio_top1_acc = T.mean(lasagne.objectives.categorical_accuracy(
audio_valid_network_output_flattened, self.targets_var.flatten(), top_k=1))
self.audio_top1_acc_fn = theano.function(
[self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var,
self.targets_var], audio_top1_acc)
audio_top3_acc = T.mean(lasagne.objectives.categorical_accuracy(
audio_valid_network_output_flattened, self.targets_var.flatten(), top_k=k))
self.audio_top3_acc_fn = theano.function(
[self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var,
self.targets_var], audio_top3_acc)
if debug:
try:
valid_out = self.audio_valid_network_output_fn(self.mfccs, self.masks, self.validAudioFrames)
logger.debug('valid_out.shape: %s', valid_out.shape)
# logger.debug('valid_out, value: \n%s', valid_out)
valid_out_flattened = self.audio_network_output_flattened_fn(self.mfccs, self.masks,
self.validAudioFrames)
logger.debug('valid_out_flat.shape: %s', valid_out_flattened.shape)
# logger.debug('valid_out_flat, value: \n%s', valid_out_flattened)
valid_preds2 = self.audio_predictions_fn(self.mfccs, self.masks, self.validAudioFrames)
logger.debug('valid_preds2.shape: %s', valid_preds2.shape)
# logger.debug('valid_preds2, value: \n%s', valid_preds2)
logger.debug('validAudioFrames.shape: %s', self.validAudioFrames.shape)
logger.debug('valid_targets.shape: %s', self.validLabels.shape)
logger.debug('valid_targets, value: %s', self.validLabels)
top1 = self.audio_top1_acc_fn(self.mfccs, self.masks, self.validAudioFrames, self.validLabels)
logger.debug("top 1 accuracy: %s", top1 * 100.0)
top3 = self.audio_top3_acc_fn(self.mfccs, self.masks, self.validAudioFrames, self.validLabels)
logger.debug("top 3 accuracy: %s", top3 * 100.0)
except Exception as error:
print('caught this error: ' + traceback.format_exc());
import pdb; pdb.set_trace()
# with Lasagne SliceLayer outputs:
audio_cost_pointwise = lasagne.objectives.categorical_crossentropy(audio_valid_network_output_flattened,
self.targets_var.flatten())
audio_cost = lasagne.objectives.aggregate(audio_cost_pointwise)
# Functions for computing cost and training
self.audio_val_fn = theano.function(
[self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var, self.targets_var],
[audio_cost, audio_top1_acc, audio_top3_acc], name='validate_fn')
self.audio_val_preds_fn = theano.function(
[self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var, self.targets_var],
[audio_cost, audio_top1_acc, audio_top3_acc, audio_valid_predictions], name='validate_fn')
if debug:
self.audio_cost_pointwise_fn = theano.function([self.audio_inputs_var, self.audio_masks_var,
self.audio_valid_frames_var, self.targets_var],
audio_cost_pointwise, name='cost_pointwise_fn')
# logger.debug('cost pointwise: %s',
# self.audio_cost_pointwise_fn(self.mfccs, self.masks, self.validAudioFrames, self.validLabels))
evaluate_cost = self.audio_val_fn(self.mfccs, self.masks, self.validAudioFrames, self.validLabels)
logger.debug('cost: {:.3f}'.format(float(evaluate_cost[0])))
logger.debug('accuracy: {:.3f} %'.format(float(evaluate_cost[1]) * 100))
logger.debug('Top 3 accuracy: {:.3f} %'.format(float(evaluate_cost[2]) * 100))
# pdb.set_trace()
# Retrieve all trainable parameters from the network
audio_params = L.get_all_params(self.audioNet_lout, trainable=True)
self.audio_updates = lasagne.updates.adam(loss_or_grads=audio_cost, params=audio_params, learning_rate=self.LR_var)
self.audio_train_fn = theano.function([self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var,
self.targets_var, self.LR_var],
audio_cost, updates=self.audio_updates, name='train_fn')
#######################
### For Combined part ##
########################
if runType == 'combined':
if debug:
logger.debug("\n\n Combined Network")
RNN_features = L.get_output(self.audioNet_lout_features)
CNN_features = L.get_output(self.CNN_lout_features)
get_features = theano.function([self.CNN_input_var, self.audio_inputs_var, self.audio_masks_var,
self.audio_valid_frames_var], [RNN_features, CNN_features])
try:
RNN_feat, CNN_feat = get_features(self.images,
self.mfccs,
self.masks,
self.validAudioFrames)
logger.debug("RNN_feat.shape: %s", RNN_feat.shape)
logger.debug("CNN_feat.shape: %s", CNN_feat.shape)
except Exception as error:
print('caught this error: ' + traceback.format_exc());
import pdb;
pdb.set_trace()
# For training, use nondeterministic output
combined_network_output = L.get_output(self.combined_lout, deterministic=False)
# cross-entropy loss
combined_loss = LO.categorical_crossentropy(combined_network_output, self.targets_var.flatten())
combined_loss = combined_loss.mean()
# weight regularization
weight_decay = 1e-5
combined_weightsl2 = lasagne.regularization.regularize_network_params(self.combined_lout, lasagne.regularization.l2)
combined_loss += weight_decay * combined_weightsl2
# set all params to trainable
combined_params = L.get_all_params(self.combined_lout, trainable=True)
# remove subnet parameters so they are kept fixed (already pretrained)
if not allowSubnetTraining:
combined_params = list(set(combined_params) - set(L.get_all_params(self.CNN_lout, trainable=True)))
combined_params = list(set(combined_params) - set(L.get_all_params(self.audioNet_lout, trainable=True)))
combined_updates = lasagne.updates.adam(loss_or_grads=combined_loss, params=combined_params, learning_rate=self.LR_var)
self.combined_train_fn = theano.function([self.CNN_input_var,self.audio_inputs_var, self.audio_masks_var,
self.audio_valid_frames_var,
self.targets_var, self.LR_var], combined_loss, updates=combined_updates)
# for validation: disable dropout etc layers -> deterministic
combined_test_network_output = L.get_output(self.combined_lout, deterministic=True)
combined_preds = T.argmax(combined_test_network_output, axis=1)
combined_test_acc = T.mean(T.eq(combined_preds, self.targets_var.flatten()),
dtype=theano.config.floatX)
combined_test_loss = LO.categorical_crossentropy(combined_test_network_output, self.targets_var.flatten());
combined_test_loss = combined_test_loss.mean()
self.combined_output_fn = theano.function(
[self.CNN_input_var, self.audio_inputs_var, self.audio_masks_var, self.audio_valid_frames_var],
combined_test_network_output)
combined_top3_acc = T.mean(lasagne.objectives.categorical_accuracy(combined_test_network_output,
self.targets_var.flatten(), top_k=k))
self.combined_top3acc_fn = theano.function([self.CNN_input_var, self.audio_inputs_var, self.audio_masks_var,
self.audio_valid_frames_var,
self.targets_var], combined_top3_acc)
self.combined_val_fn = theano.function([self.CNN_input_var, self.audio_inputs_var, self.audio_masks_var,
self.audio_valid_frames_var,
self.targets_var], [combined_test_loss, combined_test_acc, combined_top3_acc])
self.combined_val_preds_fn = theano.function([self.CNN_input_var, self.audio_inputs_var, self.audio_masks_var,
self.audio_valid_frames_var,
self.targets_var],
[combined_test_loss, combined_test_acc, combined_top3_acc, combined_preds])
if debug:
try:
comb_test_loss, comb_test_acc, comb_top3_acc = self.combined_val_fn(self.images,
self.mfccs,
self.masks,
self.validAudioFrames,
self.validLabels)
logger.debug("Combined network: \ntest loss: %s \n test acc: %s \n top3_acc: %s",
comb_test_loss, comb_test_acc * 100.0, comb_top3_acc * 100.0)
except Exception as error:
print('caught this error: ' + traceback.format_exc());
import pdb;
pdb.set_trace()
def shuffle(self, lst):
import random
c = list(zip(*lst))
random.shuffle(c)
shuffled = zip(*c)
for i in range(len(shuffled)):
shuffled[i] = list(shuffled[i])
return shuffled
# This function trains the model a full epoch (on the whole dataset)
def train_epoch(self, runType, images, mfccs, validLabels, valid_frames, LR, batch_size=-1, dataLength=-1):
if batch_size == -1: batch_size = self.batch_size # always 1
cost = 0;
nb_batches = len(mfccs) / batch_size
if "volunteers" in self.test_dataset:
loops = range(nb_batches)
else: loops = tqdm(range(nb_batches), total=nb_batches)
for i in loops:
# optimization if same images are reused for multiple audio files (eg if noise is added)
if dataLength == -1: batch_images = images[i * batch_size:(i + 1) * batch_size][0]
else:
try:
if (i+1) * batch_size >= dataLength:
# get till end, then wrap back to first images
batch_images = images[i * batch_size % dataLength:] + images[:(i+1) * batch_size % dataLength]
batch_images = batch_images[0]
else: batch_images = images[i * batch_size % dataLength:(i + 1) * batch_size % dataLength][0]
except: import pdb;pdb.set_trace()
batch_mfccs = mfccs[i * batch_size:(i + 1) * batch_size]
batch_validLabels = validLabels[i * batch_size:(i + 1) * batch_size]
batch_valid_frames = valid_frames[i * batch_size:(i + 1) * batch_size]
batch_masks = generate_masks(batch_mfccs, valid_frames=batch_valid_frames, batch_size=batch_size)
# now pad inputs and target to maxLen
batch_mfccs = pad_sequences_X(batch_mfccs)
batch_valid_frames = pad_sequences_y(batch_valid_frames)
batch_validLabels = pad_sequences_y(batch_validLabels)
# print("batch_mfccs.shape: ", batch_mfccs.shape)
# print("batch_validLabels.shape: ", batch_validLabels.shape)
if runType == 'audio':
cst = self.audio_train_fn(batch_mfccs, batch_masks, batch_valid_frames,
batch_validLabels, LR) # training
elif runType == 'lipreading':
cst = self.lipreading_train_fn(batch_images, batch_validLabels, LR)
else: # train combined
cst = self.combined_train_fn(batch_images, batch_mfccs, batch_masks, batch_valid_frames,
batch_validLabels, LR)
cost += cst;
return cost, nb_batches
# This function trains the model a full epoch (on the whole dataset)
def val_epoch(self, runType, images, mfccs, validLabels, valid_frames, batch_size=-1, dataLength=-1):
if batch_size == -1: batch_size = self.batch_size
cost = 0;
accuracy = 0
top3_accuracy = 0
nb_batches = len(mfccs) / batch_size
if "volunteers" in self.test_dataset: loops = range(nb_batches)
else: loops = tqdm(range(nb_batches), total=nb_batches)
for i in loops:
if dataLength == -1: batch_images = images[i * batch_size:(i + 1) * batch_size][0]
else:
try: batch_images = images[i * batch_size % dataLength:(i + 1) * batch_size % dataLength][0] #optimization if same images are reused for multiple audio files (eg if noise is added)
except: import pdb;pdb.set_trace()
batch_mfccs = mfccs[i * batch_size:(i + 1) * batch_size]
batch_validLabels = validLabels[i * batch_size:(i + 1) * batch_size]
batch_valid_frames = valid_frames[i * batch_size:(i + 1) * batch_size]
batch_masks = generate_masks(batch_mfccs, valid_frames=batch_valid_frames, batch_size=batch_size)
# now pad inputs and target to maxLen
batch_mfccs = pad_sequences_X(batch_mfccs)
batch_valid_frames = pad_sequences_y(batch_valid_frames)
batch_validLabels = pad_sequences_y(batch_validLabels)
# print("batch_mfccs.shape: ", batch_mfccs.shape)
# print("batch_validLabels.shape: ", batch_validLabels.shape)
# import pdb; pdb.set_trace()
if runType == 'audio':
cst, acc, top3_acc = self.audio_val_fn(batch_mfccs, batch_masks, batch_valid_frames,
batch_validLabels) # training
elif runType == 'lipreading':
cst, acc, top3_acc = self.lipreading_val_fn(batch_images, batch_validLabels)
else: # train combined
cst, acc, top3_acc = self.combined_val_fn(batch_images, batch_mfccs, batch_masks, batch_valid_frames,
batch_validLabels)
cost += cst;
accuracy += acc
top3_accuracy += top3_acc
return cost, accuracy, top3_accuracy, nb_batches
# This function trains the model a full epoch (on the whole dataset)
def val_epoch_withPreds(self, runType, images, mfccs, validLabels, valid_frames, batch_size=-1):
if batch_size == -1: batch_size = self.batch_size
cost = 0;
accuracy = 0
top3_accuracy = 0
nb_batches = len(mfccs) / batch_size
predictions = []
if "volunteers" in self.test_dataset:
loops = range(nb_batches)
else:
loops = tqdm(range(nb_batches), total=nb_batches)
for i in loops:
batch_images = images[i * batch_size:(i + 1) * batch_size][0]
batch_mfccs = mfccs[i * batch_size:(i + 1) * batch_size]
batch_validLabels = validLabels[i * batch_size:(i + 1) * batch_size]
batch_valid_frames = valid_frames[i * batch_size:(i + 1) * batch_size]
batch_masks = generate_masks(batch_mfccs, valid_frames=batch_valid_frames, batch_size=batch_size)
# now pad inputs and target to maxLen
batch_mfccs = pad_sequences_X(batch_mfccs)
batch_valid_frames = pad_sequences_y(batch_valid_frames)
batch_validLabels = pad_sequences_y(batch_validLabels)
# print("batch_mfccs.shape: ", batch_mfccs.shape)
# print("batch_validLabels.shape: ", batch_validLabels.shape)
# import pdb; pdb.set_trace()
if runType == 'audio':
cst, acc, top3_acc, preds = self.audio_val_preds_fn(batch_mfccs, batch_masks, batch_valid_frames,
batch_validLabels) # training
elif runType == 'lipreading':
cst, acc, top3_acc, preds = self.lipreading_val_preds_fn(batch_images, batch_validLabels)
else: # train combined
cst, acc, top3_acc, preds = self.combined_val_preds_fn(batch_images, batch_mfccs, batch_masks,
batch_valid_frames,
batch_validLabels)
cost += cst;
accuracy += acc
top3_accuracy += top3_acc
predictions.append(list(preds))
return cost, accuracy, top3_accuracy, nb_batches, predictions
# evaluate many TRAINING speaker files -> train loss, val loss and val error. Load them in one by one (so they fit in memory)
def evalTRAINING(self, trainingSpeakerFiles, LR, runType='audio', shuffleEnabled=True, sourceDataDir=None,
storeProcessed=False, processedDir=None,
withNoise=False, noiseType='white', ratio_dB=-3,
verbose=False, logger=logger_combinedtools):
train_cost = 0;
val_acc = 0;
val_cost = 0;
val_topk_acc = 0;
nb_train_batches = 0;
nb_val_batches = 0;
# for each speaker, pass over the train set, then val set. (test is other files). save the results.
for speakerFile in tqdm(trainingSpeakerFiles, total=len(trainingSpeakerFiles)):
if verbose: logger.debug("processing %s", speakerFile)
train, val, test = preprocessingCombined.getOneSpeaker(
speakerFile=speakerFile, sourceDataDir=sourceDataDir,
trainFraction=0.8, validFraction=0.2,
storeProcessed=storeProcessed, processedDir=processedDir, logger=logger,
withNoise=withNoise, noiseType=noiseType, ratio_dB=ratio_dB)
if shuffleEnabled: train = self.shuffle(train)
images_train, mfccs_train, audioLabels_train, validLabels_train, validAudioFrames_train = train
images_val, mfccs_val, audioLabels_val, validLabels_val, validAudioFrames_val = val
images_test, mfccs_test, audioLabels_test, validLabels_test, validAudioFrames_test = test
if verbose:
logger.debug("the number of training examples is: %s", len(images_train))
logger.debug("the number of valid examples is: %s", len(images_val))
logger.debug("the number of test examples is: %s", len(images_test))
train_cost_one, train_batches_one = self.train_epoch(runType=runType,
images=images_train,
mfccs=mfccs_train,
validLabels=validLabels_train,
valid_frames=validAudioFrames_train,
LR=LR)
train_cost += train_cost_one;
nb_train_batches += train_batches_one
# get results for validation set
val_cost_one, val_acc_one, val_topk_acc_one, val_batches_one = self.val_epoch(runType=runType,
images=images_val,
mfccs=mfccs_val,
validLabels=validLabels_val,
valid_frames=validAudioFrames_val)
val_cost += val_cost_one;
val_acc += val_acc_one;
val_topk_acc += val_topk_acc_one
nb_val_batches += val_batches_one;
if verbose:
logger.debug(" this speaker results: ")
logger.debug("\ttraining cost: %s", train_cost_one / train_batches_one)
logger.debug("\tvalidation cost: %s", val_cost_one / val_batches_one)
logger.debug("\vvalidation acc rate: %s %%", val_acc_one / val_batches_one * 100)
logger.debug("\vvalidation top 3 acc rate: %s %%", val_topk_acc_one / val_batches_one * 100)
# get the average over all speakers
train_cost /= nb_train_batches
val_cost /= nb_val_batches
val_acc = val_acc / nb_val_batches * 100 # convert to %
val_topk_acc = val_topk_acc / nb_val_batches * 100 # convert to %
return train_cost, val_cost, val_acc, val_topk_acc
def evalTEST(self, testSpeakerFiles, runType='audio', sourceDataDir=None, storeProcessed=False, processedDir=None,
withNoise=False, noiseType='white', ratio_dB=-3,
verbose=False, logger=logger_combinedtools):
test_acc = 0;
test_cost = 0;
test_topk_acc = 0;
nb_test_batches = 0;
# for each speaker, pass over the train set, then test set. (test is other files). save the results.
for speakerFile in tqdm(testSpeakerFiles, total=len(testSpeakerFiles)):
if verbose: logger.debug("processing %s", speakerFile)
train, val, test = preprocessingCombined.getOneSpeaker(
speakerFile=speakerFile, sourceDataDir=sourceDataDir,
trainFraction=0.0, validFraction=0.0,
storeProcessed=storeProcessed, processedDir=processedDir, logger=logger,
withNoise=False, noiseType='white', ratio_dB=-3)
images_train, mfccs_train, audioLabels_train, validLabels_train, validAudioFrames_train = train
images_val, mfccs_val, audioLabels_val, validLabels_val, validAudioFrames_val = val
images_test, mfccs_test, audioLabels_test, validLabels_test, validAudioFrames_test = test
if verbose:
logger.debug("the number of training examples is: %s", len(images_train))
logger.debug("the number of valid examples is: %s", len(images_val))
logger.debug("the number of test examples is: %s", len(images_test))
import pdb;pdb.set_trace()
# get results for testidation set
test_cost_one, test_acc_one, test_topk_acc_one, test_batches_one = self.val_epoch(runType=runType,
images=images_test,
mfccs=mfccs_test,
validLabels=validLabels_test,
valid_frames=validAudioFrames_test)
test_acc += test_acc_one;
test_cost += test_cost_one;
test_topk_acc += test_topk_acc_one
nb_test_batches += test_batches_one;
if verbose:
logger.debug(" this speaker results: ")
logger.debug("\ttest cost: %s", test_cost_one / test_batches_one)
logger.debug("\vtest acc rate: %s %%", test_acc_one / test_batches_one * 100)
logger.debug("\vtest top 3 acc rate: %s %%", test_topk_acc_one / test_batches_one * 100)
# get the average over all speakers
test_cost /= nb_test_batches
test_acc = test_acc / nb_test_batches * 100
test_topk_acc = test_topk_acc / nb_test_batches * 100
return test_cost, test_acc, test_topk_acc
def train(self, datasetFiles, database_binaryDir, runType='combined', storeProcessed=False, processedDir=None,
save_name='Best_model', datasetName='TCDTIMIT', nbPhonemes=39, viseme=False,
num_epochs=40, batch_size=1, LR_start=1e-4, LR_decay=1,
justTest=False, withNoise=False, addNoisyAudio=False, noiseType = 'white', ratio_dB = -3,
shuffleEnabled=True, compute_confusion=False, debug=False, logger=logger_combinedtools):
trainingSpeakerFiles, testSpeakerFiles = datasetFiles
logger.info("\n* Starting training...")
best_val_acc, test_acc = self.loadPreviousResults(save_name)
logger.info("Initial best Val acc: %s", best_val_acc)
logger.info("Initial best test acc: %s\n", test_acc)
# init some performance keepers
best_epoch = 1
LR = LR_start
self.epochsNotImproved = 0
if not self.loadPerSpeaker: #load all the lipspeakers in memory, then don't touch the files -> no reloading needed = faster training
trainPath = os.path.expanduser("~/TCDTIMIT/combinedSR/TCDTIMIT/binaryLipspeakers/allLipspeakersTrain.pkl")
valPath = os.path.expanduser("~/TCDTIMIT/combinedSR/TCDTIMIT/binaryLipspeakers/allLipspeakersVal.pkl")
testPath = os.path.expanduser("~/TCDTIMIT/combinedSR/TCDTIMIT/binaryLipspeakers/allLipspeakersTest.pkl")
if viseme:
trainPath = trainPath.replace(".pkl","_viseme.pkl")
valPath = valPath.replace(".pkl", "_viseme.pkl")
testPath = testPath.replace(".pkl", "_viseme.pkl")
allImages_train, allMfccs_train, allAudioLabels_train, allValidLabels_train, allValidAudioFrames_train = unpickle(trainPath)
allImages_val, allMfccs_val, allAudioLabels_val, allValidLabels_val, allValidAudioFrames_val = unpickle(valPath)
allImages_test, allMfccs_test, allAudioLabels_test, allValidLabels_test, allValidAudioFrames_test = unpickle(testPath)
# if you wish to train with noise, you need to replace the audio data with noisy audio from audioSR/firDataset/audioToPkl_perVideo.py,
# like so (but also for train and val)
if withNoise and not addNoisyAudio: #TODO ugly hack, but we still want performance results on clean audio when training with noisy data addded
# replace clean audio by noisy audio
testPath = os.path.expanduser(
"~/TCDTIMIT/combinedSR/") + datasetName + "/binaryLipspeakers" + os.sep \
+ 'allLipspeakersTest' + "_" + noiseType + "_" + "ratio" + str(ratio_dB) + '.pkl'
allMfccs_test, allAudioLabels_test, allValidLabels_test, allValidAudioFrames_test = unpickle(testPath)
nbTrainVideos = len(allImages_train)
if addNoisyAudio: # don't overwrite the clean audio but add noisy data to TRAIN set: same images but with noisy audio
if type(noiseType)!=list: noiseType = [noiseType]
for noiseTypeVal in noiseType:
logger_combinedtools.info(" appending %s audio data...", noiseTypeVal)
if type(ratio_dB) != list: ratio_dB = [ratio_dB]
for ratio in ratio_dB:
logger_combinedtools.info(" of lvl %s", ratio)
trainPath = os.path.expanduser(
"~/TCDTIMIT/combinedSR/") + datasetName + "/binaryLipspeakers" + os.sep \
+ 'allLipspeakersTrain' + "_" + noiseTypeVal + "_" + "ratio" + str(ratio) + '.pkl'
allMfccs_trainAdd, allAudioLabels_trainAdd, allValidLabels_trainAdd, allValidAudioFrames_trainAdd = unpickle(
trainPath)
allMfccs_train += allMfccs_trainAdd
allAudioLabels_train += allAudioLabels_trainAdd
allValidLabels_train += allValidLabels_trainAdd
allValidAudioFrames_train += allValidAudioFrames_trainAdd
logger_combinedtools.debug("data loading complete, evaluating...")
dataLength = nbTrainVideos; i = 788; images = allImages_train
try:
if (i + 1) * batch_size >= dataLength:
# get till end, then wrap back to first images
batch_images = images[i * batch_size % dataLength:] + images[:(i + 1) * batch_size % dataLength]
else:
batch_images = images[i * batch_size % dataLength:(i + 1) * batch_size % dataLength][0]
except:
import pdb;pdb.set_trace()
test_cost, test_acc, test_topk_acc, nb_test_batches = self.val_epoch(runType=runType,
images=allImages_test,
mfccs=allMfccs_test,
validLabels=allValidLabels_test,
valid_frames=allValidAudioFrames_test,
batch_size=1)
test_cost /= nb_test_batches
test_acc = test_acc / nb_test_batches * 100
test_topk_acc = test_topk_acc / nb_test_batches * 100
else:
test_cost, test_acc, test_topk_acc = self.evalTEST(testSpeakerFiles,
runType=runType,
sourceDataDir=database_binaryDir,
storeProcessed=storeProcessed,
processedDir=processedDir,
withNoise=withNoise, noiseType=noiseType, ratio_dB=ratio_dB)
# # TODO: end remove
logger.info("TEST results: ")
logger.info("\t test cost: %s", test_cost)
logger.info("\t test acc rate: %s %%", test_acc)
logger.info("\t test top 3 acc: %s %%", test_topk_acc)
if justTest: return
logger.info("starting training for %s epochs...", num_epochs)
# now run through the epochs
for epoch in range(num_epochs):
logger.info("\n\n\n Epoch %s started", epoch + 1)
start_time = time.time()
if self.loadPerSpeaker:
train_cost, val_cost, val_acc, val_topk_acc = self.evalTRAINING(trainingSpeakerFiles, LR=LR,
runType=runType,
shuffleEnabled=shuffleEnabled,
sourceDataDir=database_binaryDir,
storeProcessed=storeProcessed,
processedDir=processedDir,
withNoise=withNoise,
noiseType=noiseType, ratio_dB=ratio_dB)
else:
train_cost, nb_train_batches = self.train_epoch(runType=runType,
images=allImages_train,
mfccs=allMfccs_train,
validLabels=allValidLabels_train,
valid_frames=allValidAudioFrames_train,
LR=LR, dataLength=nbTrainVideos)
train_cost /= nb_train_batches
val_cost, val_acc, val_topk_acc, nb_val_batches = self.val_epoch(runType=runType,
images=allImages_val,
mfccs=allMfccs_val,
validLabels=allValidLabels_val,
valid_frames=allValidAudioFrames_val,
batch_size=1)
val_cost /= nb_val_batches
val_acc = val_acc / nb_val_batches * 100
val_topk_acc = val_topk_acc / nb_val_batches * 100
# test if validation acc went up
printTest = False
resetNetwork=False
if val_acc > best_val_acc:
printTest = True
if val_acc - best_val_acc > 0.2: self.epochsNotImproved = 0 #don't keep training if just a little bit of improvment
best_val_acc = val_acc
best_epoch = epoch + 1
logger.info("\n\nBest ever validation score; evaluating TEST set...")
if self.loadPerSpeaker:
test_cost, test_acc, test_topk_acc = self.evalTEST(testSpeakerFiles, runType=runType,
sourceDataDir=database_binaryDir,
storeProcessed=storeProcessed,
processedDir=processedDir,
withNoise=withNoise, noiseType=noiseType,
ratio_dB=ratio_dB)
else:
test_cost, test_acc, test_topk_acc, nb_test_batches = self.val_epoch(runType=runType,
images=allImages_test,
mfccs=allMfccs_test,
validLabels=allValidLabels_test,
valid_frames=allValidAudioFrames_test,
batch_size=1)
test_cost /= nb_test_batches
test_acc = test_acc / nb_test_batches * 100
test_topk_acc = test_topk_acc / nb_test_batches * 100
logger.info("TEST results: ")
logger.info("\t test cost: %s", test_cost)
logger.info("\t test acc rate: %s %%", test_acc)
logger.info("\t test top 3 acc: %s %%", test_topk_acc)
self.best_cost = val_cost
self.best_epoch = self.curr_epoch
# get the parameters of the model we're training
if runType == 'audio': lout = self.audioNet_lout
elif runType == 'lipreading': lout = self.lipreading_lout
elif runType == 'combined': lout = self.combined_lout
else: raise IOError("can't save network params; network output not found")
self.best_param = L.get_all_param_values(lout)
logger.info("New best model found!")
if save_name is not None:
logger.info("Model saved as " + save_name)
self.save_model(save_name)
# save top scores
self.network_train_info['final_test_cost'] = test_cost
self.network_train_info['final_test_acc'] = test_acc
self.network_train_info['final_test_top3_acc'] = test_topk_acc
else: #reset to best model we had
resetNetwork= True
epoch_duration = time.time() - start_time
# Then we logger.info the results for this epoch:
logger.info("Epoch %s of %s took %s seconds", epoch + 1, num_epochs, epoch_duration)
logger.info(" LR: %s", LR)
logger.info(" training cost: %s", train_cost)
logger.info(" validation cost: %s", val_cost)
logger.info(" validation acc rate: %s %%", val_acc)
logger.info(" validation top 3 acc rate: %s %%", val_topk_acc)
logger.info(" best epoch: %s", best_epoch)
logger.info(" best validation acc rate: %s %%", best_val_acc)
if printTest:
logger.info(" test cost: %s", test_cost)
logger.info(" test acc rate: %s %%", test_acc)
logger.info(" test top 3 acc rate: %s %%", test_topk_acc)
# save the training info
self.network_train_info['train_cost'].append(train_cost)
self.network_train_info['val_cost'].append(val_cost)
self.network_train_info['val_acc'].append(val_acc)
self.network_train_info['val_topk_acc'].append(val_topk_acc)
self.network_train_info['test_cost'].append(test_cost)
self.network_train_info['test_acc'].append(test_acc)
self.network_train_info['test_topk_acc'].append(test_topk_acc)
nb_params = self.getParamsInfo()
self.network_train_info['nb_params'] = nb_params
store_path = save_name + '_trainInfo.pkl'
saveToPkl(store_path, self.network_train_info)
logger.info("Train info written to:\t %s", store_path)
# decay the LR
# LR *= LR_decay
LR = self.updateLR(LR, LR_decay)
if resetNetwork: self.setNetworkParams(runType)
if self.epochsNotImproved > 3:
logger.warning("\n\n NO MORE IMPROVEMENTS -> stop training")
finalTestResults = self.finalNetworkEvaluation(save_name=save_name,
database_binaryDir=database_binaryDir,
processedDir=processedDir,
runType=runType,
storeProcessed=storeProcessed,
testSpeakerFiles=testSpeakerFiles,
withNoise=withNoise, noiseType=noiseType, ratio_dB=ratio_dB)
break
logger.info("Done.")
return finalTestResults
def loadPreviousResults(self, save_name, logger=logger_combinedtools):
# try to load performance metrics of stored model
best_val_acc = 0
test_topk_acc = 0
test_cost = 0
test_acc = 0
try:
if os.path.exists(save_name + ".npz") and os.path.exists(save_name + "_trainInfo.pkl"):
old_train_info = unpickle(save_name + '_trainInfo.pkl')
if type(old_train_info) == dict: # normal case
best_val_acc = max(old_train_info['val_acc'])
test_cost = min(old_train_info['test_cost'])
test_acc = max(old_train_info['test_acc'])
test_topk_acc = max(old_train_info['test_topk_acc'])
self.network_train_info = old_train_info #load old train info so it won't get lost on retrain
if not 'final_test_cost' in self.network_train_info.keys():
self.network_train_info['final_test_cost'] = min(self.network_train_info['test_cost'])
if not 'final_test_acc' in self.network_train_info.keys():
self.network_train_info['final_test_acc'] = max(self.network_train_info['test_acc'])
if not 'final_test_top3_acc' in self.network_train_info.keys():
self.network_train_info['final_test_top3_acc'] = max(self.network_train_info['test_topk_acc'])
else:
logger.warning("old trainInfo found, but wrong format: %s", save_name + "_trainInfo.pkl")
# do nothing
else:
return -1,-1
except:
logger.warning("No old trainInfo found...")
pass
return best_val_acc, test_acc
# evaluate network on test set.
# Combined network -> evaluate audio, lipreading and then combined network
# Audio network -> evaluate audio
# Lipreading -> evaluate lipreading
def finalNetworkEvaluation(self, save_name, database_binaryDir, processedDir, runType, testSpeakerFiles,
withNoise=False, noiseType='white', ratio_dB=-3, datasetName='TCDTIMIT', roundParams=False,
storeProcessed=False, nbPhonemes=39, viseme=False, withPreds=False, logger=logger_combinedtools):
if "volunteers" in self.test_dataset :
loadPerSpeaker = True
else: loadPerSpeaker = self.loadPerSpeaker #load default value
print(loadPerSpeaker)
# else, load data that is given (True -> volunteers, False -> lipspeakers)
if viseme: nbPhonemes = 12
# print what kind of network we're running
if runType == 'lipreading': networkType = "lipreading " + self.lipreadingType
else: networkType = runType
logger.info(" \n\n Running FINAL evaluation on Test set... (%s network type)", networkType)
# get the data to test
store_path = save_name + '_trainInfo.pkl' #dictionary with lists that contain training info for each epoch (train/val/test accuracy, cost etc)
self.network_train_info = unpickle(store_path)
# for the lipspeaker files that are all loaded in memory at once, we still need to get the data
if not loadPerSpeaker: # load all the lipspeakers in memory, then don't touch the files -> no reloading needed = faster
testPath = os.path.expanduser("~/TCDTIMIT/combinedSR/TCDTIMIT/binaryLipspeakers/allLipspeakersTest.pkl")
if viseme:
testPath = testPath.replace(".pkl", "_viseme.pkl")
allImages_test, allMfccs_test, allAudioLabels_test, allValidLabels_test, allValidAudioFrames_test = unpickle(
testPath)
if withNoise:
testPath= os.path.expanduser("~/TCDTIMIT/combinedSR/") + datasetName + "/binaryLipspeakers" + os.sep \
+ 'allLipspeakersTest' + "_" + noiseType + "_" + "ratio" + str(ratio_dB) + '.pkl'
allMfccs_test, allAudioLabels_test, allValidLabels_test, allValidAudioFrames_test = unpickle(testPath)
if loadPerSpeaker:
test_cost, test_acc, test_topk_acc = self.evalTEST(testSpeakerFiles, runType=runType,
sourceDataDir=database_binaryDir,
storeProcessed=storeProcessed,
processedDir=processedDir,
withNoise=withNoise, noiseType=noiseType,
ratio_dB=ratio_dB)
else:
if withPreds:
test_cost, test_acc, test_topk_acc, nb_test_batches, predictions = self.val_epoch_withPreds(runType=runType,
images=allImages_test,
mfccs=allMfccs_test,
validLabels=allValidLabels_test,
valid_frames=allValidAudioFrames_test,
batch_size=1)
confMatrix = self.getConfusionMatrix(allValidLabels_test, predictions, nbPhonemes)
saveToPkl(save_name + "_confusionMatrix.pkl", confMatrix)
logger.info("saved confusion matrix to: %s_confusionMatrix.pkl", save_name)
else:
test_cost, test_acc, test_topk_acc, nb_test_batches = self.val_epoch(runType=runType,
images=allImages_test,
mfccs=allMfccs_test,
validLabels=allValidLabels_test,
valid_frames=allValidAudioFrames_test,
batch_size=1)
test_cost /= nb_test_batches
test_acc = test_acc / nb_test_batches * 100
test_topk_acc = test_topk_acc / nb_test_batches * 100
logger.info("FINAL TEST results on %s: ", runType)
if roundParams: logger.info("ROUND_PARAMS")
logger.info("\t %s test cost: %s", runType, test_cost)
logger.info("\t %s test acc rate: %s %%", runType, test_acc)
logger.info("\t %s test top 3 acc: %s %%", runType, test_topk_acc)
if self.test_dataset != self.dataset:
testType = "_" + self.test_dataset
else:
testType = ""
if roundParams:
testType = "_roundParams" + testType
if runType != 'lipreading' and withNoise:
print(noiseType + "_" + "ratio" + str(ratio_dB) + testType)
self.network_train_info[
'final_test_cost_' + noiseType + "_" + "ratio" + str(ratio_dB) + testType] = test_cost
self.network_train_info['final_test_acc_' + noiseType + "_" + "ratio" + str(ratio_dB) + testType] = test_acc
self.network_train_info[
'final_test_top3_acc_' + noiseType + "_" + "ratio" + str(ratio_dB) + testType] = test_topk_acc
else:
self.network_train_info['final_test_cost' + testType] = test_cost
self.network_train_info['final_test_acc' + testType] = test_acc
self.network_train_info['final_test_top3_acc' + testType] = test_topk_acc
nb_params = self.getParamsInfo()
self.network_train_info['nb_params'] = nb_params
saveToPkl(store_path, self.network_train_info)
return test_cost, test_acc, test_topk_acc
def getConfusionMatrix(self,y_test, maxprob, nbClasses):
import theano
from theano import tensor as T
x = T.ivector('x')
classes = T.scalar('n_classes')
onehot = T.eq(x.dimshuffle(0, 'x'), T.arange(classes).dimshuffle('x', 0))
oneHot = theano.function([x, classes], onehot)
examples = T.scalar('n_examples')
y = T.imatrix('y')
y_pred = T.imatrix('y_pred')
confMat = T.dot(y.T, y_pred) / examples
confusionMatrix = theano.function(inputs=[y, y_pred, examples], outputs=confMat)
def confusion_matrix(targets, preds, n_class):
try:assert len(targets) >= len(preds)
except: import pdb;pdb.set_trace()
targets = targets[:len(preds)]
targetsFlat = []; predsFlat = []
for i in range(len(targets)):
targetsFlat += list(targets[i])
predsFlat += list(preds[i])
return confusionMatrix(oneHot(targetsFlat, n_class), oneHot(predsFlat, n_class), len(targetsFlat))
return confusion_matrix(y_test, maxprob, nbClasses)
def updateLR(self, LR, LR_decay, logger=logger_combinedtools):
this_acc = self.network_train_info['val_acc'][-1]
this_cost = self.network_train_info['val_cost'][-1]
try:
last_acc = self.network_train_info['val_acc'][-2]
last_cost = self.network_train_info['val_cost'][-2]
except:
last_acc = -10
last_cost = 10 * this_cost # first time it will fail because there is only 1 result stored
# only reduce LR if not much improvment anymore
if this_cost / float(last_cost) >= 0.98 or this_acc-last_acc < 0.2:
logger.info(" Error not much reduced: %s vs %s. Reducing LR: %s", this_cost, last_cost, LR * LR_decay)
self.epochsNotImproved += 1
return LR * LR_decay
else:
self.epochsNotImproved = max(self.epochsNotImproved - 1, 0) # reduce by 1, minimum 0
return LR
| mit | 3,380,043,371,187,710,500 | 51.543954 | 200 | 0.537702 | false |
imiolek-ireneusz/pysiogame | game_boards/game010.py | 1 | 11752 | # -*- coding: utf-8 -*-
import pygame
import random
import sys
from math import sqrt
import classes.board
import classes.extras as ex
import classes.game_driver as gd
import classes.level_controller as lc
import classes.simple_vector as sv
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self, mainloop, 1, 1)
gd.BoardGame.__init__(self, mainloop, speaker, config, screen_w, screen_h, 11, 9)
self.max_size = 99
self.board.draw_grid = False
def create_game_objects(self, level=1):
self.allow_unit_animations = False
self.active_tool = 0
self.active_letter = "A"
self.active_word = "Apple"
self.word_pos_y = 0
self.var_brush = 1
s = random.randrange(30, 80)
v = random.randrange(200, 255)
h = random.randrange(0, 255)
letter_color = ex.hsv_to_rgb(h, s, v)
font_color = ex.hsv_to_rgb(h, 255, 140)
if self.mainloop.scheme is not None:
self.bg_color = self.mainloop.scheme.u_color
color = self.mainloop.scheme.u_color
else:
self.bg_color = [255, 255, 255]
color = [255, 255, 255]
llc = self.lang.alphabet_lc
luc = self.lang.alphabet_uc
l = len(llc)
if l % 2 == 0:
lh = l // 2
else:
lh = l // 2 + 1
hue_step = 255 // (lh * 2)
self.count = l * 2 + lh
data = [35, l, 0, 8]
font_size = 12
font_size2 = 14
self.brush_size = data[3]
# stretch width to fit the screen size
max_x_count = self.get_x_count(data[1], even=None)
if max_x_count > 35:
data[0] = max_x_count
self.data = data
self.vis_buttons = [0, 0, 0, 0, 1, 0, 1, 0, 0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.layout.update_layout(data[0], data[1])
scale = self.layout.scale
self.board.level_start(data[0], data[1], scale)
# canvas
self.board.add_unit(10, 0, data[0] - 16, data[1], classes.board.Letter, "", color, "", font_size)
self.canvas_block = self.board.ships[0]
self.canvas_block.set_outline([0, 54, 229], 1)
self.canvas_block.font3 = self.board.font_sizes[font_size2]
x = 0
y = 0
# i_chr = 65
for i in range(0, l):
self.board.add_unit(x, y, 2, 2, classes.board.Letter, luc[i], letter_color, "", 25)
self.board.add_unit(x + 4, y, 2, 2, classes.board.Letter, llc[i], letter_color, "", 25)
if i < lh:
self.board.add_unit(x + 8, y, 2, 2, classes.board.Letter, str(i), letter_color, "", 25)
y += 2
if y >= l:
x = 2
y = 0
self.board.add_door(0, 0, 2, 2, classes.board.Door, "", color, "")
self.board.add_door(data[0] - 1, 17, 1, 1, classes.board.Door, "", color, "")
tool_len = len(self.board.ships)
# color pallette
h = 0
s = 250
v = 70
# number of available color spaces minus 2 for black and white
number_of_col_per_hue = 6 # number_of_colors // number_of_hues
v_num = (255 - v) // (number_of_col_per_hue)
# greyscale
grey_num = 6 # number_of_colors+2 - number_of_hues * number_of_col_per_hue
if grey_num > 1:
grey_v_num = (255 // (grey_num - 1))
else:
grey_v_num = 0
grey_count = 0
for j in range(0, data[1]):
for i in range(data[0] - 6, data[0]):
color2 = ex.hsv_to_rgb(h, s, v)
self.board.add_unit(i, j, 1, 1, classes.board.Ship, "", color2, "", 2)
if h < 249:
if i < data[0] - 1:
v += v_num
else:
v = 70
s = 250
h += hue_step
else:
if grey_count == 0:
s = 0
v = 0
grey_count += 1
else:
v += grey_v_num
self.active_color = self.board.ships[173].initcolor
self.size_display = self.board.units[0]
self.tool_door = self.board.units[-2]
self.color_door = self.board.units[-1]
self.btn_down = False
# points
self.p_first = [0, 0]
self.p_last = [0, 0]
self.p_prev = [0, 0]
self.p_current = [0, 0]
self.outline_all(1, 1)
doors = [self.tool_door, self.color_door]
for each in doors:
each.door_outline = True
each.perm_outline_color = [255, 0, 0]
self.board.all_sprites_list.move_to_front(each)
for each in self.board.ships:
each.outline = False
each.font_color = font_color
each.immobilize()
self.canvas = pygame.Surface(
[self.canvas_block.grid_w * self.board.scale, self.canvas_block.grid_h * self.board.scale - 1])
self.canvas.fill(self.canvas_block.initcolor)
self.paint_bg_letter()
self.canvas_org = self.canvas.copy()
def handle(self, event):
gd.BoardGame.handle(self, event) # send event handling up
if event.type == pygame.MOUSEBUTTONDOWN:
# Change the x/y screen coordinates to grid coordinates
pos = event.pos
active = self.board.active_ship
if event.button == 1:
if active == 0:
self.btn_down = True
canvas_pos = [pos[0] - self.layout.game_left - 10 * self.layout.scale,
pos[1] - self.layout.top_margin]
self.p_first = canvas_pos
self.p_prev = canvas_pos
self.p_current = canvas_pos
self.brush_size = self.data[3]
self.paint_pencil(0)
pygame.mouse.set_cursor(*pygame.cursors.broken_x)
elif 0 < active < self.count + 1:
self.active_letter = self.board.ships[self.board.active_ship].value
# self.active_word = self.word_list[self.board.active_ship-1]#"Zebra"#self.active_letter
self.tool_door.set_pos(self.board.active_ship_pos)
self.paint_bg_letter()
elif active > self.count:
self.active_color = self.board.ships[active].initcolor
self.color_door.set_pos(self.board.active_ship_pos)
elif event.type == pygame.MOUSEMOTION and self.btn_down == True:
active = self.board.active_ship
pos = event.pos
column = (pos[0] - self.layout.game_left) // (self.layout.width)
row = (pos[1] - self.layout.top_margin) // (self.layout.height)
if active == 0 and self.data[0] - 6 > column > 9 and row < self.data[1]:
canvas_pos = [pos[0] - self.layout.game_left - 10 * self.layout.scale, pos[1] - self.layout.top_margin]
self.p_prev = self.p_current
self.p_current = canvas_pos
self.paint_pencil(1)
elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:
active = self.board.active_ship
pos = event.pos
column = (pos[0] - self.layout.game_left) // (self.layout.width)
row = (pos[1] - self.layout.top_margin) // (self.layout.height)
if active == 0 and self.data[0] - 6 > column > 9 and row < self.data[1]:
# drop the new object onto the painting
canvas_pos = [pos[0] - self.layout.game_left - 10 * self.layout.scale, pos[1] - self.layout.top_margin]
self.p_last = canvas_pos
self.paint_pencil(2)
else:
if self.btn_down:
self.screen_restore()
self.copy_to_screen()
self.btn_down = False
def paint_bg_letter(self):
if sys.version_info < (3, 0):
txt = unicode(self.active_letter, "utf-8")
else:
txt = self.active_letter
try:
text = self.canvas_block.font.render("%s" % (txt), 1, (220, 220, 220, 0))
font_x = ((self.board.scale * self.canvas_block.grid_w - self.canvas_block.font.size(txt)[0]) // 2)
font_y = ((self.board.scale * self.canvas_block.grid_h - self.canvas_block.font.size(txt)[
1]) // 2) - 3 * self.board.scale
self.canvas.fill(self.bg_color)
self.canvas.blit(text, (font_x, font_y))
self.copy_to_screen()
except:
pass
# states => mouse states => 0 - mouse_btn_down, 1 - mouse_move, 2 - mouse_btn_up
def paint_pencil(self, state):
if self.brush_size > 0:
if state == 0:
self.backup_canvas()
pygame.draw.circle(self.canvas, self.active_color, self.p_current, self.brush_size // 2, 0)
self.copy_to_screen()
elif state == 1:
width = self.brush_size
if self.brush_size > 2:
if self.brush_size % 2 == 0:
r = self.brush_size // 2
width = self.brush_size + 3
else:
r = self.brush_size // 2
width = self.brush_size + 2
pygame.draw.circle(self.canvas, self.active_color, self.p_current, r, 0)
if self.brush_size > 3:
self.draw_line(self.p_prev, self.p_current, self.brush_size, self.brush_size)
else:
pygame.draw.line(self.canvas, self.active_color, self.p_prev, self.p_current, width)
self.copy_to_screen()
def draw_line(self, p1, p2, bs1, bs2):
# find points for the corners of the polygon using Tales Theorem
# and draw the polygon - rotated rectangle or trapezium and 2 circles at the ends of the 'line'
v = sv.Vector2.from_points(p1, p2)
if v[0] != 0 or v[1] != 0:
bs1 = bs1 // 2
bs2 = bs2 // 2
# vector length
v_len = sqrt(v[0] * v[0] + v[1] * v[1])
x1 = v[1] * bs1 / v_len
y1 = v[0] * bs1 / v_len
if bs1 != bs2:
x2 = v[1] * bs2 / v_len
y2 = v[0] * bs2 / v_len
else:
x2 = x1
y2 = y1
points = []
points.append([int(p1[0] - x1), int(p1[1] + y1)])
points.append([int(p1[0] + x1), int(p1[1] - y1)])
points.append([int(p2[0] + x2), int(p2[1] - y2)])
points.append([int(p2[0] - x2), int(p2[1] + y2)])
pygame.draw.polygon(self.canvas, self.active_color, points)
pygame.draw.aalines(self.canvas, self.active_color, True, points, 1)
pygame.draw.circle(self.canvas, self.active_color, p1, bs1, 0)
pygame.draw.circle(self.canvas, self.active_color, p2, bs2, 0)
def backup_canvas(self):
self.canvas_org = self.canvas_block.painting.copy()
def copy_to_screen(self):
self.canvas_block.painting = self.canvas.copy()
self.canvas_block.update_me = True
self.mainloop.redraw_needed[0] = True
def screen_restore(self):
self.canvas = self.canvas_org.copy()
def update(self, game):
game.fill((255, 255, 255))
gd.BoardGame.update(self, game) # rest of painting done by parent
def check_result(self):
pass
| gpl-3.0 | 6,626,082,368,293,703,000 | 37.531148 | 119 | 0.509786 | false |
yandex/mastermind | src/cocaine-app/jobs/tasks/minion_cmd.py | 1 | 2826 | import logging
import time
import elliptics
from tornado.httpclient import HTTPError
from infrastructure import infrastructure
from infrastructure_cache import cache
from jobs import TaskTypes, RetryError
from task import Task
logger = logging.getLogger('mm.jobs')
class MinionCmdTask(Task):
PARAMS = ('group', 'host', 'cmd', 'params', 'minion_cmd_id')
TASK_TIMEOUT = 6000
def __init__(self, job):
super(MinionCmdTask, self).__init__(job)
self.minion_cmd = None
self.minion_cmd_id = None
self.type = TaskTypes.TYPE_MINION_CMD
@classmethod
def new(cls, job, **kwargs):
task = super(MinionCmdTask, cls).new(job, **kwargs)
task.params['task_id'] = task.id
return task
def update_status(self, processor):
try:
self.minion_cmd = processor.minions._get_command(self.minion_cmd_id)
logger.debug('Job {0}, task {1}, minion command status was updated: {2}'.format(
self.parent_job.id, self.id, self.minion_cmd))
except elliptics.Error as e:
logger.warn('Job {0}, task {1}, minion command status {2} failed to fetch '
'from metadb: {3}'.format(self.parent_job.id, self.id,
self.minion_cmd_id, e))
pass
def execute(self, processor):
try:
minion_response = processor.minions._execute_cmd(
self.host,
self.cmd,
self.params
)
except HTTPError as e:
raise RetryError(self.attempts, e)
self._set_minion_task_parameters(minion_response.values()[0])
def _set_minion_task_parameters(self, minion_cmd):
self.minion_cmd = minion_cmd
self.minion_cmd_id = self.minion_cmd['uid']
logger.info(
'Job {job_id}, task {task_id}, minions task '
'execution: {command}'.format(
job_id=self.parent_job.id,
task_id=self.id,
command=self.minion_cmd
)
)
def human_dump(self):
data = super(MinionCmdTask, self).human_dump()
data['hostname'] = cache.get_hostname_by_addr(data['host'], strict=False)
return data
def finished(self, processor):
return ((self.minion_cmd is None and
time.time() - self.start_ts > self.TASK_TIMEOUT) or
(self.minion_cmd and self.minion_cmd['progress'] == 1.0))
def failed(self, processor):
if self.minion_cmd is None:
return True
return (self.minion_cmd['exit_code'] != 0 and
self.minion_cmd.get('command_code') not in
self.params.get('success_codes', []))
def __str__(self):
return 'MinionCmdTask[id: {0}]<{1}>'.format(self.id, self.cmd)
| gpl-2.0 | 6,444,915,082,982,438,000 | 32.247059 | 92 | 0.57891 | false |
testbed/testbed | testbed/libexec/db/commands.py | 1 | 3630 | # (c) 2015 Mark Hamilton, <[email protected]>
#
# This file is part of testbed
#
# Testbed is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Testbed is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Testdb. If not, see <http://www.gnu.org/licenses/>.
"""
Provide status information useful for debugging installation and setup.
"""
import logging
import ConfigParser
LOGGER = logging.getLogger(__name__)
def default_file_fill(ddict, default_file):
""" Read default_file option for mysql configuration.
django 1.7 and newer supports confifiguration defined in a plain text
file.
"""
if not default_file:
return ddict
config = ConfigParser.RawConfigParser()
config.read(default_file)
for (key, value) in config.items("client"):
key = key.upper()
ddict[key] = value
return ddict
def do_dbls(_):
""" Add a test. """
import djconfig
print "databases"
for (db_name, db_config) in djconfig.settings.DATABASES.items():
db_options = db_config.get("OPTIONS", {})
default_file = db_options.get("read_default_file", {})
db_config = default_file_fill(db_config, default_file)
engine = db_config.get("ENGINE")
host = db_config.get("HOST", None)
user = db_config.get("USER", None)
if user and host:
hostname = "%s@%s:" % (user, host)
else:
hostname = ""
name = db_config.get("NAME", None)
print " %s: %s%s:%s" % (db_name, hostname, engine, name)
def do_dbcheck(_):
""" Check that the installation is good. """
import djconfig
if "default" in djconfig.settings.DATABASES:
print "default database found ... pass"
else:
print "default database found ... fail"
return 1
##
# It does not matter if there are any products. Mostly like this is
# called just after installation. This is just a way to confirm
# installation is correct because the model can connect to the database
# pylint: disable=W0703
try:
from testdb import models
print "load models ... pass"
except Exception:
print "load models ... fail"
return 1
# pylint: disable=W0106
try:
[item for item in models.Product.objects.all()]
print "connect to database ... pass"
except Exception:
print "connect to database ... fail"
return 1
# pylint: enable=W0106
# pylint: enable=W0703
return 0
def add_subparser(subparser):
""" Status CLI commands. """
##
# Adding a test requires a testsuite.
#
# test add <testsuite> <name>
parser = subparser.add_parser("db", help=__doc__)
subparser = parser.add_subparsers()
parser = subparser.add_parser("list",
description="list databases",
help="List all databases.")
parser.set_defaults(func=do_dbls)
parser = subparser.add_parser("check",
description="list databases",
help="List all databases.")
parser.set_defaults(func=do_dbcheck)
return subparser
| gpl-3.0 | -94,741,906,210,379,090 | 29.25 | 75 | 0.626171 | false |
coldeasy/python-driver | cassandra/cqltypes.py | 1 | 35173 | # Copyright 2013-2017 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Representation of Cassandra data types. These classes should make it simple for
the library (and caller software) to deal with Cassandra-style Java class type
names and CQL type specifiers, and convert between them cleanly. Parameterized
types are fully supported in both flavors. Once you have the right Type object
for the type you want, you can use it to serialize, deserialize, or retrieve
the corresponding CQL or Cassandra type strings.
"""
# NOTE:
# If/when the need arises for interpret types from CQL string literals in
# different ways (for https://issues.apache.org/jira/browse/CASSANDRA-3799,
# for example), these classes would be a good place to tack on
# .from_cql_literal() and .as_cql_literal() classmethods (or whatever).
from __future__ import absolute_import # to enable import io from stdlib
from binascii import unhexlify
import calendar
from collections import namedtuple
from decimal import Decimal
import io
import logging
import re
import socket
import time
import six
from six.moves import range
import sys
from uuid import UUID
import warnings
from cassandra.marshal import (int8_pack, int8_unpack, int16_pack, int16_unpack,
uint16_pack, uint16_unpack, uint32_pack, uint32_unpack,
int32_pack, int32_unpack, int64_pack, int64_unpack,
float_pack, float_unpack, double_pack, double_unpack,
varint_pack, varint_unpack, vints_pack, vints_unpack)
from cassandra import util
apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.'
cassandra_empty_type = 'org.apache.cassandra.db.marshal.EmptyType'
cql_empty_type = 'empty'
log = logging.getLogger(__name__)
if six.PY3:
_number_types = frozenset((int, float))
long = int
def _name_from_hex_string(encoded_name):
bin_str = unhexlify(encoded_name)
return bin_str.decode('ascii')
else:
_number_types = frozenset((int, long, float))
_name_from_hex_string = unhexlify
def trim_if_startswith(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
return s
_casstypes = {}
_cqltypes = {}
cql_type_scanner = re.Scanner((
('frozen', None),
(r'[a-zA-Z0-9_]+', lambda s, t: t),
(r'[\s,<>]', None),
))
def cql_types_from_string(cql_type):
return cql_type_scanner.scan(cql_type)[0]
class CassandraTypeType(type):
"""
The CassandraType objects in this module will normally be used directly,
rather than through instances of those types. They can be instantiated,
of course, but the type information is what this driver mainly needs.
This metaclass registers CassandraType classes in the global
by-cassandra-typename and by-cql-typename registries, unless their class
name starts with an underscore.
"""
def __new__(metacls, name, bases, dct):
dct.setdefault('cassname', name)
cls = type.__new__(metacls, name, bases, dct)
if not name.startswith('_'):
_casstypes[name] = cls
if not cls.typename.startswith(apache_cassandra_type_prefix):
_cqltypes[cls.typename] = cls
return cls
casstype_scanner = re.Scanner((
(r'[()]', lambda s, t: t),
(r'[a-zA-Z0-9_.:=>]+', lambda s, t: t),
(r'[\s,]', None),
))
def lookup_casstype_simple(casstype):
"""
Given a Cassandra type name (either fully distinguished or not), hand
back the CassandraType class responsible for it. If a name is not
recognized, a custom _UnrecognizedType subclass will be created for it.
This function does not handle complex types (so no type parameters--
nothing with parentheses). Use lookup_casstype() instead if you might need
that.
"""
shortname = trim_if_startswith(casstype, apache_cassandra_type_prefix)
try:
typeclass = _casstypes[shortname]
except KeyError:
typeclass = mkUnrecognizedType(casstype)
return typeclass
def parse_casstype_args(typestring):
tokens, remainder = casstype_scanner.scan(typestring)
if remainder:
raise ValueError("weird characters %r at end" % remainder)
# use a stack of (types, names) lists
args = [([], [])]
for tok in tokens:
if tok == '(':
args.append(([], []))
elif tok == ')':
types, names = args.pop()
prev_types, prev_names = args[-1]
prev_types[-1] = prev_types[-1].apply_parameters(types, names)
else:
types, names = args[-1]
parts = re.split(':|=>', tok)
tok = parts.pop()
if parts:
names.append(parts[0])
else:
names.append(None)
ctype = lookup_casstype_simple(tok)
types.append(ctype)
# return the first (outer) type, which will have all parameters applied
return args[0][0][0]
def lookup_casstype(casstype):
"""
Given a Cassandra type as a string (possibly including parameters), hand
back the CassandraType class responsible for it. If a name is not
recognized, a custom _UnrecognizedType subclass will be created for it.
Example:
>>> lookup_casstype('org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.Int32Type)')
<class 'cassandra.cqltypes.MapType(UTF8Type, Int32Type)'>
"""
if isinstance(casstype, (CassandraType, CassandraTypeType)):
return casstype
try:
return parse_casstype_args(casstype)
except (ValueError, AssertionError, IndexError) as e:
raise ValueError("Don't know how to parse type string %r: %s" % (casstype, e))
def is_reversed_casstype(data_type):
return issubclass(data_type, ReversedType)
class EmptyValue(object):
""" See _CassandraType.support_empty_values """
def __str__(self):
return "EMPTY"
__repr__ = __str__
EMPTY = EmptyValue()
@six.add_metaclass(CassandraTypeType)
class _CassandraType(object):
subtypes = ()
num_subtypes = 0
empty_binary_ok = False
support_empty_values = False
"""
Back in the Thrift days, empty strings were used for "null" values of
all types, including non-string types. For most users, an empty
string value in an int column is the same as being null/not present,
so the driver normally returns None in this case. (For string-like
types, it *will* return an empty string by default instead of None.)
To avoid this behavior, set this to :const:`True`. Instead of returning
None for empty string values, the EMPTY singleton (an instance
of EmptyValue) will be returned.
"""
def __repr__(self):
return '<%s( %r )>' % (self.cql_parameterized_type(), self.val)
@classmethod
def from_binary(cls, byts, protocol_version):
"""
Deserialize a bytestring into a value. See the deserialize() method
for more information. This method differs in that if None or the empty
string is passed in, None may be returned.
"""
if byts is None:
return None
elif len(byts) == 0 and not cls.empty_binary_ok:
return EMPTY if cls.support_empty_values else None
return cls.deserialize(byts, protocol_version)
@classmethod
def to_binary(cls, val, protocol_version):
"""
Serialize a value into a bytestring. See the serialize() method for
more information. This method differs in that if None is passed in,
the result is the empty string.
"""
return b'' if val is None else cls.serialize(val, protocol_version)
@staticmethod
def deserialize(byts, protocol_version):
"""
Given a bytestring, deserialize into a value according to the protocol
for this type. Note that this does not create a new instance of this
class; it merely gives back a value that would be appropriate to go
inside an instance of this class.
"""
return byts
@staticmethod
def serialize(val, protocol_version):
"""
Given a value appropriate for this class, serialize it according to the
protocol for this type and return the corresponding bytestring.
"""
return val
@classmethod
def cass_parameterized_type_with(cls, subtypes, full=False):
"""
Return the name of this type as it would be expressed by Cassandra,
optionally fully qualified. If subtypes is not None, it is expected
to be a list of other CassandraType subclasses, and the output
string includes the Cassandra names for those subclasses as well,
as parameters to this one.
Example:
>>> LongType.cass_parameterized_type_with(())
'LongType'
>>> LongType.cass_parameterized_type_with((), full=True)
'org.apache.cassandra.db.marshal.LongType'
>>> SetType.cass_parameterized_type_with([DecimalType], full=True)
'org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.DecimalType)'
"""
cname = cls.cassname
if full and '.' not in cname:
cname = apache_cassandra_type_prefix + cname
if not subtypes:
return cname
sublist = ', '.join(styp.cass_parameterized_type(full=full) for styp in subtypes)
return '%s(%s)' % (cname, sublist)
@classmethod
def apply_parameters(cls, subtypes, names=None):
"""
Given a set of other CassandraTypes, create a new subtype of this type
using them as parameters. This is how composite types are constructed.
>>> MapType.apply_parameters([DateType, BooleanType])
<class 'cassandra.cqltypes.MapType(DateType, BooleanType)'>
`subtypes` will be a sequence of CassandraTypes. If provided, `names`
will be an equally long sequence of column names or Nones.
"""
if cls.num_subtypes != 'UNKNOWN' and len(subtypes) != cls.num_subtypes:
raise ValueError("%s types require %d subtypes (%d given)"
% (cls.typename, cls.num_subtypes, len(subtypes)))
newname = cls.cass_parameterized_type_with(subtypes)
if six.PY2 and isinstance(newname, unicode):
newname = newname.encode('utf-8')
return type(newname, (cls,), {'subtypes': subtypes, 'cassname': cls.cassname, 'fieldnames': names})
@classmethod
def cql_parameterized_type(cls):
"""
Return a CQL type specifier for this type. If this type has parameters,
they are included in standard CQL <> notation.
"""
if not cls.subtypes:
return cls.typename
return '%s<%s>' % (cls.typename, ', '.join(styp.cql_parameterized_type() for styp in cls.subtypes))
@classmethod
def cass_parameterized_type(cls, full=False):
"""
Return a Cassandra type specifier for this type. If this type has
parameters, they are included in the standard () notation.
"""
return cls.cass_parameterized_type_with(cls.subtypes, full=full)
# it's initially named with a _ to avoid registering it as a real type, but
# client programs may want to use the name still for isinstance(), etc
CassandraType = _CassandraType
class _UnrecognizedType(_CassandraType):
num_subtypes = 'UNKNOWN'
if six.PY3:
def mkUnrecognizedType(casstypename):
return CassandraTypeType(casstypename,
(_UnrecognizedType,),
{'typename': "'%s'" % casstypename})
else:
def mkUnrecognizedType(casstypename): # noqa
return CassandraTypeType(casstypename.encode('utf8'),
(_UnrecognizedType,),
{'typename': "'%s'" % casstypename})
class BytesType(_CassandraType):
typename = 'blob'
empty_binary_ok = True
@staticmethod
def serialize(val, protocol_version):
return six.binary_type(val)
class DecimalType(_CassandraType):
typename = 'decimal'
@staticmethod
def deserialize(byts, protocol_version):
scale = int32_unpack(byts[:4])
unscaled = varint_unpack(byts[4:])
return Decimal('%de%d' % (unscaled, -scale))
@staticmethod
def serialize(dec, protocol_version):
try:
sign, digits, exponent = dec.as_tuple()
except AttributeError:
try:
sign, digits, exponent = Decimal(dec).as_tuple()
except Exception:
raise TypeError("Invalid type for Decimal value: %r", dec)
unscaled = int(''.join([str(digit) for digit in digits]))
if sign:
unscaled *= -1
scale = int32_pack(-exponent)
unscaled = varint_pack(unscaled)
return scale + unscaled
class UUIDType(_CassandraType):
typename = 'uuid'
@staticmethod
def deserialize(byts, protocol_version):
return UUID(bytes=byts)
@staticmethod
def serialize(uuid, protocol_version):
try:
return uuid.bytes
except AttributeError:
raise TypeError("Got a non-UUID object for a UUID value")
class BooleanType(_CassandraType):
typename = 'boolean'
@staticmethod
def deserialize(byts, protocol_version):
return bool(int8_unpack(byts))
@staticmethod
def serialize(truth, protocol_version):
return int8_pack(truth)
class ByteType(_CassandraType):
typename = 'tinyint'
@staticmethod
def deserialize(byts, protocol_version):
return int8_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return int8_pack(byts)
if six.PY2:
class AsciiType(_CassandraType):
typename = 'ascii'
empty_binary_ok = True
else:
class AsciiType(_CassandraType):
typename = 'ascii'
empty_binary_ok = True
@staticmethod
def deserialize(byts, protocol_version):
return byts.decode('ascii')
@staticmethod
def serialize(var, protocol_version):
try:
return var.encode('ascii')
except UnicodeDecodeError:
return var
class FloatType(_CassandraType):
typename = 'float'
@staticmethod
def deserialize(byts, protocol_version):
return float_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return float_pack(byts)
class DoubleType(_CassandraType):
typename = 'double'
@staticmethod
def deserialize(byts, protocol_version):
return double_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return double_pack(byts)
class LongType(_CassandraType):
typename = 'bigint'
@staticmethod
def deserialize(byts, protocol_version):
return int64_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return int64_pack(byts)
class Int32Type(_CassandraType):
typename = 'int'
@staticmethod
def deserialize(byts, protocol_version):
return int32_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return int32_pack(byts)
class IntegerType(_CassandraType):
typename = 'varint'
@staticmethod
def deserialize(byts, protocol_version):
return varint_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return varint_pack(byts)
class InetAddressType(_CassandraType):
typename = 'inet'
@staticmethod
def deserialize(byts, protocol_version):
if len(byts) == 16:
return util.inet_ntop(socket.AF_INET6, byts)
else:
# util.inet_pton could also handle, but this is faster
# since we've already determined the AF
return socket.inet_ntoa(byts)
@staticmethod
def serialize(addr, protocol_version):
if ':' in addr:
return util.inet_pton(socket.AF_INET6, addr)
else:
# util.inet_pton could also handle, but this is faster
# since we've already determined the AF
return socket.inet_aton(addr)
class CounterColumnType(LongType):
typename = 'counter'
cql_timestamp_formats = (
'%Y-%m-%d %H:%M',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%dT%H:%M',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%d'
)
_have_warned_about_timestamps = False
class DateType(_CassandraType):
typename = 'timestamp'
@staticmethod
def interpret_datestring(val):
if val[-5] in ('+', '-'):
offset = (int(val[-4:-2]) * 3600 + int(val[-2:]) * 60) * int(val[-5] + '1')
val = val[:-5]
else:
offset = -time.timezone
for tformat in cql_timestamp_formats:
try:
tval = time.strptime(val, tformat)
except ValueError:
continue
# scale seconds to millis for the raw value
return (calendar.timegm(tval) + offset) * 1e3
else:
raise ValueError("can't interpret %r as a date" % (val,))
@staticmethod
def deserialize(byts, protocol_version):
timestamp = int64_unpack(byts) / 1000.0
return util.datetime_from_timestamp(timestamp)
@staticmethod
def serialize(v, protocol_version):
try:
# v is datetime
timestamp_seconds = calendar.timegm(v.utctimetuple())
timestamp = timestamp_seconds * 1e3 + getattr(v, 'microsecond', 0) / 1e3
except AttributeError:
try:
timestamp = calendar.timegm(v.timetuple()) * 1e3
except AttributeError:
# Ints and floats are valid timestamps too
if type(v) not in _number_types:
raise TypeError('DateType arguments must be a datetime, date, or timestamp')
timestamp = v
return int64_pack(long(timestamp))
class TimestampType(DateType):
pass
class TimeUUIDType(DateType):
typename = 'timeuuid'
def my_timestamp(self):
return util.unix_time_from_uuid1(self.val)
@staticmethod
def deserialize(byts, protocol_version):
return UUID(bytes=byts)
@staticmethod
def serialize(timeuuid, protocol_version):
try:
return timeuuid.bytes
except AttributeError:
raise TypeError("Got a non-UUID object for a UUID value")
class SimpleDateType(_CassandraType):
typename = 'date'
date_format = "%Y-%m-%d"
# Values of the 'date'` type are encoded as 32-bit unsigned integers
# representing a number of days with epoch (January 1st, 1970) at the center of the
# range (2^31).
EPOCH_OFFSET_DAYS = 2 ** 31
@staticmethod
def deserialize(byts, protocol_version):
days = uint32_unpack(byts) - SimpleDateType.EPOCH_OFFSET_DAYS
return util.Date(days)
@staticmethod
def serialize(val, protocol_version):
try:
days = val.days_from_epoch
except AttributeError:
if isinstance(val, six.integer_types):
# the DB wants offset int values, but util.Date init takes days from epoch
# here we assume int values are offset, as they would appear in CQL
# short circuit to avoid subtracting just to add offset
return uint32_pack(val)
days = util.Date(val).days_from_epoch
return uint32_pack(days + SimpleDateType.EPOCH_OFFSET_DAYS)
class ShortType(_CassandraType):
typename = 'smallint'
@staticmethod
def deserialize(byts, protocol_version):
return int16_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return int16_pack(byts)
class TimeType(_CassandraType):
typename = 'time'
@staticmethod
def deserialize(byts, protocol_version):
return util.Time(int64_unpack(byts))
@staticmethod
def serialize(val, protocol_version):
try:
nano = val.nanosecond_time
except AttributeError:
nano = util.Time(val).nanosecond_time
return int64_pack(nano)
class DurationType(_CassandraType):
typename = 'duration'
@staticmethod
def deserialize(byts, protocol_version):
months, days, nanoseconds = vints_unpack(byts)
return util.Duration(months, days, nanoseconds)
@staticmethod
def serialize(duration, protocol_version):
try:
m, d, n = duration.months, duration.days, duration.nanoseconds
except AttributeError:
raise TypeError('DurationType arguments must be a Duration.')
return vints_pack([m, d, n])
class UTF8Type(_CassandraType):
typename = 'text'
empty_binary_ok = True
@staticmethod
def deserialize(byts, protocol_version):
return byts.decode('utf8')
@staticmethod
def serialize(ustr, protocol_version):
try:
return ustr.encode('utf-8')
except UnicodeDecodeError:
# already utf-8
return ustr
class VarcharType(UTF8Type):
typename = 'varchar'
class _ParameterizedType(_CassandraType):
num_subtypes = 'UNKNOWN'
@classmethod
def deserialize(cls, byts, protocol_version):
if not cls.subtypes:
raise NotImplementedError("can't deserialize unparameterized %s"
% cls.typename)
return cls.deserialize_safe(byts, protocol_version)
@classmethod
def serialize(cls, val, protocol_version):
if not cls.subtypes:
raise NotImplementedError("can't serialize unparameterized %s"
% cls.typename)
return cls.serialize_safe(val, protocol_version)
class _SimpleParameterizedType(_ParameterizedType):
@classmethod
def deserialize_safe(cls, byts, protocol_version):
subtype, = cls.subtypes
if protocol_version >= 3:
unpack = int32_unpack
length = 4
else:
unpack = uint16_unpack
length = 2
numelements = unpack(byts[:length])
p = length
result = []
inner_proto = max(3, protocol_version)
for _ in range(numelements):
itemlen = unpack(byts[p:p + length])
p += length
item = byts[p:p + itemlen]
p += itemlen
result.append(subtype.from_binary(item, inner_proto))
return cls.adapter(result)
@classmethod
def serialize_safe(cls, items, protocol_version):
if isinstance(items, six.string_types):
raise TypeError("Received a string for a type that expects a sequence")
subtype, = cls.subtypes
pack = int32_pack if protocol_version >= 3 else uint16_pack
buf = io.BytesIO()
buf.write(pack(len(items)))
inner_proto = max(3, protocol_version)
for item in items:
itembytes = subtype.to_binary(item, inner_proto)
buf.write(pack(len(itembytes)))
buf.write(itembytes)
return buf.getvalue()
class ListType(_SimpleParameterizedType):
typename = 'list'
num_subtypes = 1
adapter = list
class SetType(_SimpleParameterizedType):
typename = 'set'
num_subtypes = 1
adapter = util.sortedset
class MapType(_ParameterizedType):
typename = 'map'
num_subtypes = 2
@classmethod
def deserialize_safe(cls, byts, protocol_version):
key_type, value_type = cls.subtypes
if protocol_version >= 3:
unpack = int32_unpack
length = 4
else:
unpack = uint16_unpack
length = 2
numelements = unpack(byts[:length])
p = length
themap = util.OrderedMapSerializedKey(key_type, protocol_version)
inner_proto = max(3, protocol_version)
for _ in range(numelements):
key_len = unpack(byts[p:p + length])
p += length
keybytes = byts[p:p + key_len]
p += key_len
val_len = unpack(byts[p:p + length])
p += length
valbytes = byts[p:p + val_len]
p += val_len
key = key_type.from_binary(keybytes, inner_proto)
val = value_type.from_binary(valbytes, inner_proto)
themap._insert_unchecked(key, keybytes, val)
return themap
@classmethod
def serialize_safe(cls, themap, protocol_version):
key_type, value_type = cls.subtypes
pack = int32_pack if protocol_version >= 3 else uint16_pack
buf = io.BytesIO()
buf.write(pack(len(themap)))
try:
items = six.iteritems(themap)
except AttributeError:
raise TypeError("Got a non-map object for a map value")
inner_proto = max(3, protocol_version)
for key, val in items:
keybytes = key_type.to_binary(key, inner_proto)
valbytes = value_type.to_binary(val, inner_proto)
buf.write(pack(len(keybytes)))
buf.write(keybytes)
buf.write(pack(len(valbytes)))
buf.write(valbytes)
return buf.getvalue()
class TupleType(_ParameterizedType):
typename = 'tuple'
@classmethod
def deserialize_safe(cls, byts, protocol_version):
proto_version = max(3, protocol_version)
p = 0
values = []
for col_type in cls.subtypes:
if p == len(byts):
break
itemlen = int32_unpack(byts[p:p + 4])
p += 4
if itemlen >= 0:
item = byts[p:p + itemlen]
p += itemlen
else:
item = None
# collections inside UDTs are always encoded with at least the
# version 3 format
values.append(col_type.from_binary(item, proto_version))
if len(values) < len(cls.subtypes):
nones = [None] * (len(cls.subtypes) - len(values))
values = values + nones
return tuple(values)
@classmethod
def serialize_safe(cls, val, protocol_version):
if len(val) > len(cls.subtypes):
raise ValueError("Expected %d items in a tuple, but got %d: %s" %
(len(cls.subtypes), len(val), val))
proto_version = max(3, protocol_version)
buf = io.BytesIO()
for item, subtype in zip(val, cls.subtypes):
if item is not None:
packed_item = subtype.to_binary(item, proto_version)
buf.write(int32_pack(len(packed_item)))
buf.write(packed_item)
else:
buf.write(int32_pack(-1))
return buf.getvalue()
@classmethod
def cql_parameterized_type(cls):
subtypes_string = ', '.join(sub.cql_parameterized_type() for sub in cls.subtypes)
return 'frozen<tuple<%s>>' % (subtypes_string,)
class UserType(TupleType):
typename = "org.apache.cassandra.db.marshal.UserType"
_cache = {}
_module = sys.modules[__name__]
@classmethod
def make_udt_class(cls, keyspace, udt_name, field_names, field_types):
assert len(field_names) == len(field_types)
if six.PY2 and isinstance(udt_name, unicode):
udt_name = udt_name.encode('utf-8')
instance = cls._cache.get((keyspace, udt_name))
if not instance or instance.fieldnames != field_names or instance.subtypes != field_types:
instance = type(udt_name, (cls,), {'subtypes': field_types,
'cassname': cls.cassname,
'typename': udt_name,
'fieldnames': field_names,
'keyspace': keyspace,
'mapped_class': None,
'tuple_type': cls._make_registered_udt_namedtuple(keyspace, udt_name, field_names)})
cls._cache[(keyspace, udt_name)] = instance
return instance
@classmethod
def evict_udt_class(cls, keyspace, udt_name):
if six.PY2 and isinstance(udt_name, unicode):
udt_name = udt_name.encode('utf-8')
try:
del cls._cache[(keyspace, udt_name)]
except KeyError:
pass
@classmethod
def apply_parameters(cls, subtypes, names):
keyspace = subtypes[0].cass_parameterized_type() # when parsed from cassandra type, the keyspace is created as an unrecognized cass type; This gets the name back
udt_name = _name_from_hex_string(subtypes[1].cassname)
field_names = tuple(_name_from_hex_string(encoded_name) for encoded_name in names[2:]) # using tuple here to match what comes into make_udt_class from other sources (for caching equality test)
return cls.make_udt_class(keyspace, udt_name, field_names, tuple(subtypes[2:]))
@classmethod
def cql_parameterized_type(cls):
return "frozen<%s>" % (cls.typename,)
@classmethod
def deserialize_safe(cls, byts, protocol_version):
values = super(UserType, cls).deserialize_safe(byts, protocol_version)
if cls.mapped_class:
return cls.mapped_class(**dict(zip(cls.fieldnames, values)))
elif cls.tuple_type:
return cls.tuple_type(*values)
else:
return tuple(values)
@classmethod
def serialize_safe(cls, val, protocol_version):
proto_version = max(3, protocol_version)
buf = io.BytesIO()
for i, (fieldname, subtype) in enumerate(zip(cls.fieldnames, cls.subtypes)):
# first treat as a tuple, else by custom type
try:
item = val[i]
except TypeError:
item = getattr(val, fieldname)
if item is not None:
packed_item = subtype.to_binary(item, proto_version)
buf.write(int32_pack(len(packed_item)))
buf.write(packed_item)
else:
buf.write(int32_pack(-1))
return buf.getvalue()
@classmethod
def _make_registered_udt_namedtuple(cls, keyspace, name, field_names):
# this is required to make the type resolvable via this module...
# required when unregistered udts are pickled for use as keys in
# util.OrderedMap
t = cls._make_udt_tuple_type(name, field_names)
if t:
qualified_name = "%s_%s" % (keyspace, name)
setattr(cls._module, qualified_name, t)
return t
@classmethod
def _make_udt_tuple_type(cls, name, field_names):
# fallback to positional named, then unnamed tuples
# for CQL identifiers that aren't valid in Python,
try:
t = namedtuple(name, field_names)
except ValueError:
try:
t = namedtuple(name, util._positional_rename_invalid_identifiers(field_names))
log.warn("could not create a namedtuple for '%s' because one or more field names are not valid Python identifiers (%s); " \
"returning positionally-named fields" % (name, field_names))
except ValueError:
t = None
log.warn("could not create a namedtuple for '%s' because the name is not a valid Python identifier; " \
"will return tuples in its place" % (name,))
return t
class CompositeType(_ParameterizedType):
typename = "org.apache.cassandra.db.marshal.CompositeType"
@classmethod
def cql_parameterized_type(cls):
"""
There is no CQL notation for Composites, so we override this.
"""
typestring = cls.cass_parameterized_type(full=True)
return "'%s'" % (typestring,)
@classmethod
def deserialize_safe(cls, byts, protocol_version):
result = []
for subtype in cls.subtypes:
if not byts:
# CompositeType can have missing elements at the end
break
element_length = uint16_unpack(byts[:2])
element = byts[2:2 + element_length]
# skip element length, element, and the EOC (one byte)
byts = byts[2 + element_length + 1:]
result.append(subtype.from_binary(element, protocol_version))
return tuple(result)
class DynamicCompositeType(_ParameterizedType):
typename = "org.apache.cassandra.db.marshal.DynamicCompositeType"
@classmethod
def cql_parameterized_type(cls):
sublist = ', '.join('%s=>%s' % (alias, typ.cass_parameterized_type(full=True)) for alias, typ in zip(cls.fieldnames, cls.subtypes))
return "'%s(%s)'" % (cls.typename, sublist)
class ColumnToCollectionType(_ParameterizedType):
"""
This class only really exists so that we can cleanly evaluate types when
Cassandra includes this. We don't actually need or want the extra
information.
"""
typename = "org.apache.cassandra.db.marshal.ColumnToCollectionType"
class ReversedType(_ParameterizedType):
typename = "org.apache.cassandra.db.marshal.ReversedType"
num_subtypes = 1
@classmethod
def deserialize_safe(cls, byts, protocol_version):
subtype, = cls.subtypes
return subtype.from_binary(byts, protocol_version)
@classmethod
def serialize_safe(cls, val, protocol_version):
subtype, = cls.subtypes
return subtype.to_binary(val, protocol_version)
class FrozenType(_ParameterizedType):
typename = "frozen"
num_subtypes = 1
@classmethod
def deserialize_safe(cls, byts, protocol_version):
subtype, = cls.subtypes
return subtype.from_binary(byts, protocol_version)
@classmethod
def serialize_safe(cls, val, protocol_version):
subtype, = cls.subtypes
return subtype.to_binary(val, protocol_version)
def is_counter_type(t):
if isinstance(t, six.string_types):
t = lookup_casstype(t)
return issubclass(t, CounterColumnType)
def cql_typename(casstypename):
"""
Translate a Cassandra-style type specifier (optionally-fully-distinguished
Java class names for data types, along with optional parameters) into a
CQL-style type specifier.
>>> cql_typename('DateType')
'timestamp'
>>> cql_typename('org.apache.cassandra.db.marshal.ListType(IntegerType)')
'list<varint>'
"""
return lookup_casstype(casstypename).cql_parameterized_type()
| apache-2.0 | 1,791,240,450,183,804,700 | 31.871963 | 201 | 0.618656 | false |
augustog/ecrab | ECrab_Web/migrations/0002_auto_20161203_1518.py | 1 | 2107 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-03 18:18
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('ECrab_Web', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ExtendedUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cont', models.TextField(max_length=300)),
('date', models.DateField()),
],
),
migrations.CreateModel(
name='Proyectos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=300)),
('initdate', models.DateField()),
('findate', models.DateField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Recordatorio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('done', models.BooleanField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.DeleteModel(
name='Usuario',
),
]
| gpl-3.0 | -4,112,987,239,918,332,000 | 37.309091 | 118 | 0.560987 | false |
clones/django-coltrane | coltrane/templatetags/coltrane.py | 1 | 1800 | from django.db.models import get_model
from django import template
from django.contrib.comments.models import Comment, FreeComment
from template_utils.templatetags.generic_content import GenericContentNode
from coltrane.models import Entry, Link
register = template.Library()
class LatestFeaturedNode(GenericContentNode):
def _get_query_set(self):
return self.queryset.filter(featured__exact=True)
def do_featured_entries(parser, token):
"""
Retrieves the latest ``num`` featured entries and stores them in a
specified context variable.
Syntax::
{% get_featured_entries [num] as [varname] %}
Example::
{% get_featured_entries 5 as featured_entries %}
"""
bits = token.contents.split()
if len(bits) != 4:
raise template.TemplateSyntaxError("'%s' tag takes three arguments" % bits[0])
if bits[2] != 'as':
raise template.TemplateSyntaxError("second argument to '%s' tag must be 'as'" % bits[0])
return LatestFeaturedNode('coltrane.entry', bits[1], bits[3])
def do_featured_entry(parser, token):
"""
Retrieves the latest featured Entry and stores it in a specified
context variable.
Syntax::
{% get_featured_entry as [varname] %}
Example::
{% get_featured_entry as featured_entry %}
"""
bits = token.contents.split()
if len(bits) != 3:
raise template.TemplateSyntaxError("'%s' tag takes two arguments" % bits[0])
if bits[1] != 'as':
raise template.TemplateSyntaxError("first argument to '%s' tag must be 'as'" % bits[0])
return LatestFeaturedNode('coltrane.entry', 1, bits[2])
register.tag('get_featured_entries', do_featured_entries)
register.tag('get_featured_entry', do_featured_entry)
| bsd-3-clause | 7,605,183,732,226,736,000 | 29 | 96 | 0.663333 | false |
douban/pygit2 | test/test_repository.py | 1 | 20484 | # -*- coding: utf-8 -*-
#
# Copyright 2010-2014 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Tests for Repository objects."""
# Import from the future
from __future__ import absolute_import
from __future__ import unicode_literals
# Import from the Standard Library
import binascii
import unittest
import tempfile
import os
from os.path import join, realpath
# Import from pygit2
from pygit2 import GIT_OBJ_ANY, GIT_OBJ_BLOB, GIT_OBJ_COMMIT
from pygit2 import GIT_MERGE_ANALYSIS_NONE, GIT_MERGE_ANALYSIS_NORMAL, GIT_MERGE_ANALYSIS_UP_TO_DATE
from pygit2 import GIT_MERGE_ANALYSIS_FASTFORWARD, GIT_MERGE_ANALYSIS_UNBORN
from pygit2 import (
init_repository, clone_repository, discover_repository,
Reference, hashfile, is_repository
)
from pygit2 import Oid
import pygit2
from . import utils
HEAD_SHA = '784855caf26449a1914d2cf62d12b9374d76ae78'
PARENT_SHA = 'f5e5aa4e36ab0fe62ee1ccc6eb8f79b866863b87' # HEAD^
BLOB_HEX = 'af431f20fc541ed6d5afede3e2dc7160f6f01f16'
BLOB_RAW = binascii.unhexlify(BLOB_HEX.encode('ascii'))
BLOB_OID = Oid(raw=BLOB_RAW)
class RepositoryTest(utils.BareRepoTestCase):
def test_is_empty(self):
self.assertFalse(self.repo.is_empty)
def test_is_bare(self):
self.assertTrue(self.repo.is_bare)
def test_head(self):
head = self.repo.head
self.assertEqual(HEAD_SHA, head.target.hex)
self.assertEqual(type(head), Reference)
self.assertFalse(self.repo.head_is_unborn)
self.assertFalse(self.repo.head_is_detached)
def test_read(self):
self.assertRaises(TypeError, self.repo.read, 123)
self.assertRaisesWithArg(KeyError, '1' * 40, self.repo.read, '1' * 40)
ab = self.repo.read(BLOB_OID)
a = self.repo.read(BLOB_HEX)
self.assertEqual(ab, a)
self.assertEqual((GIT_OBJ_BLOB, b'a contents\n'), a)
a2 = self.repo.read('7f129fd57e31e935c6d60a0c794efe4e6927664b')
self.assertEqual((GIT_OBJ_BLOB, b'a contents 2\n'), a2)
a_hex_prefix = BLOB_HEX[:4]
a3 = self.repo.read(a_hex_prefix)
self.assertEqual((GIT_OBJ_BLOB, b'a contents\n'), a3)
def test_write(self):
data = b"hello world"
# invalid object type
self.assertRaises(ValueError, self.repo.write, GIT_OBJ_ANY, data)
oid = self.repo.write(GIT_OBJ_BLOB, data)
self.assertEqual(type(oid), Oid)
def test_contains(self):
self.assertRaises(TypeError, lambda: 123 in self.repo)
self.assertTrue(BLOB_OID in self.repo)
self.assertTrue(BLOB_HEX in self.repo)
self.assertTrue(BLOB_HEX[:10] in self.repo)
self.assertFalse('a' * 40 in self.repo)
self.assertFalse('a' * 20 in self.repo)
def test_iterable(self):
l = [obj for obj in self.repo]
oid = Oid(hex=BLOB_HEX)
self.assertTrue(oid in l)
def test_lookup_blob(self):
self.assertRaises(TypeError, lambda: self.repo[123])
self.assertEqual(self.repo[BLOB_OID].hex, BLOB_HEX)
a = self.repo[BLOB_HEX]
self.assertEqual(b'a contents\n', a.read_raw())
self.assertEqual(BLOB_HEX, a.hex)
self.assertEqual(GIT_OBJ_BLOB, a.type)
def test_lookup_blob_prefix(self):
a = self.repo[BLOB_HEX[:5]]
self.assertEqual(b'a contents\n', a.read_raw())
self.assertEqual(BLOB_HEX, a.hex)
self.assertEqual(GIT_OBJ_BLOB, a.type)
def test_lookup_commit(self):
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
commit = self.repo[commit_sha]
self.assertEqual(commit_sha, commit.hex)
self.assertEqual(GIT_OBJ_COMMIT, commit.type)
self.assertEqual(('Second test data commit.\n\n'
'This commit has some additional text.\n'),
commit.message)
def test_lookup_commit_prefix(self):
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
commit_sha_prefix = commit_sha[:7]
too_short_prefix = commit_sha[:3]
commit = self.repo[commit_sha_prefix]
self.assertEqual(commit_sha, commit.hex)
self.assertEqual(GIT_OBJ_COMMIT, commit.type)
self.assertEqual(
('Second test data commit.\n\n'
'This commit has some additional text.\n'),
commit.message)
self.assertRaises(ValueError, self.repo.__getitem__, too_short_prefix)
def test_get_path(self):
directory = realpath(self.repo.path)
expected = realpath(self.repo_path)
self.assertEqual(directory, expected)
def test_get_workdir(self):
self.assertEqual(self.repo.workdir, None)
def test_revparse_single(self):
parent = self.repo.revparse_single('HEAD^')
self.assertEqual(parent.hex, PARENT_SHA)
def test_hash(self):
data = "foobarbaz"
hashed_sha1 = pygit2.hash(data)
written_sha1 = self.repo.create_blob(data)
self.assertEqual(hashed_sha1, written_sha1)
def test_hashfile(self):
data = "bazbarfoo"
tempfile_path = tempfile.mkstemp()[1]
with open(tempfile_path, 'w') as fh:
fh.write(data)
hashed_sha1 = hashfile(tempfile_path)
os.unlink(tempfile_path)
written_sha1 = self.repo.create_blob(data)
self.assertEqual(hashed_sha1, written_sha1)
class RepositoryTest_II(utils.RepoTestCase):
def test_is_empty(self):
self.assertFalse(self.repo.is_empty)
def test_is_bare(self):
self.assertFalse(self.repo.is_bare)
def test_get_path(self):
directory = realpath(self.repo.path)
expected = realpath(join(self.repo_path, '.git'))
self.assertEqual(directory, expected)
def test_get_workdir(self):
directory = realpath(self.repo.workdir)
expected = realpath(self.repo_path)
self.assertEqual(directory, expected)
def test_checkout_ref(self):
ref_i18n = self.repo.lookup_reference('refs/heads/i18n')
# checkout i18n with conflicts and default strategy should
# not be possible
self.assertRaises(pygit2.GitError, self.repo.checkout, ref_i18n)
# checkout i18n with GIT_CHECKOUT_FORCE
head = self.repo.head
head = self.repo[head.target]
self.assertTrue('new' not in head.tree)
self.repo.checkout(ref_i18n, pygit2.GIT_CHECKOUT_FORCE)
head = self.repo.head
head = self.repo[head.target]
self.assertEqual(head.hex, ref_i18n.target.hex)
self.assertTrue('new' in head.tree)
self.assertTrue('bye.txt' not in self.repo.status())
def test_checkout_index(self):
# some changes to working dir
with open(os.path.join(self.repo.workdir, 'hello.txt'), 'w') as f:
f.write('new content')
# checkout index
self.assertTrue('hello.txt' in self.repo.status())
self.repo.checkout(strategy=pygit2.GIT_CHECKOUT_FORCE)
self.assertTrue('hello.txt' not in self.repo.status())
def test_checkout_head(self):
# some changes to the index
with open(os.path.join(self.repo.workdir, 'bye.txt'), 'w') as f:
f.write('new content')
self.repo.index.add('bye.txt')
# checkout from index should not change anything
self.assertTrue('bye.txt' in self.repo.status())
self.repo.checkout(strategy=pygit2.GIT_CHECKOUT_FORCE)
self.assertTrue('bye.txt' in self.repo.status())
# checkout from head will reset index as well
self.repo.checkout('HEAD', pygit2.GIT_CHECKOUT_FORCE)
self.assertTrue('bye.txt' not in self.repo.status())
def test_merge_base(self):
commit = self.repo.merge_base(
'5ebeeebb320790caf276b9fc8b24546d63316533',
'4ec4389a8068641da2d6578db0419484972284c8')
self.assertEqual(commit.hex,
'acecd5ea2924a4b900e7e149496e1f4b57976e51')
def test_reset_hard(self):
ref = "5ebeeebb320790caf276b9fc8b24546d63316533"
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
self.repo.reset(
ref,
pygit2.GIT_RESET_HARD)
self.assertEqual(self.repo.head.target.hex, ref)
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
# Hard reset will reset the working copy too
self.assertFalse("hola mundo\n" in lines)
self.assertFalse("bonjour le monde\n" in lines)
def test_reset_soft(self):
ref = "5ebeeebb320790caf276b9fc8b24546d63316533"
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
self.repo.reset(
ref,
pygit2.GIT_RESET_SOFT)
self.assertEqual(self.repo.head.target.hex, ref)
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
# Soft reset will not reset the working copy
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
# soft reset will keep changes in the index
diff = self.repo.diff(cached=True)
self.assertRaises(KeyError, lambda: diff[0])
def test_reset_mixed(self):
ref = "5ebeeebb320790caf276b9fc8b24546d63316533"
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
self.repo.reset(
ref,
pygit2.GIT_RESET_MIXED)
self.assertEqual(self.repo.head.target.hex, ref)
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
# mixed reset will not reset the working copy
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
# mixed reset will set the index to match working copy
diff = self.repo.diff(cached=True)
self.assertTrue("hola mundo\n" in diff.patch)
self.assertTrue("bonjour le monde\n" in diff.patch)
class RepositoryTest_III(utils.RepoTestCaseForMerging):
def test_merge_none(self):
self.assertRaises(TypeError, self.repo.merge, None)
def test_merge_analysis_uptodate(self):
branch_head_hex = '5ebeeebb320790caf276b9fc8b24546d63316533'
branch_id = self.repo.get(branch_head_hex).id
analysis = self.repo.merge_analysis(branch_id)
self.assertTrue(analysis & GIT_MERGE_ANALYSIS_UP_TO_DATE)
self.assertFalse(analysis & GIT_MERGE_ANALYSIS_FASTFORWARD)
self.assertEqual({}, self.repo.status())
def test_tree_merge_uptodate(self):
branch_head_hex = 'e97b4cfd5db0fb4ebabf4f203979ca4e5d1c7c87'
branch = self.repo.get(branch_head_hex)
branch_tree = branch.tree
merge_base = self.repo.merge_base(
branch_head_hex,
self.repo.head.target.hex)
merge_base_tree = self.repo.get(merge_base.hex).tree
head_tree = self.repo.get(self.repo.head.target.hex).tree
merge_index = head_tree.merge(branch_tree, merge_base_tree)
self.assertTrue(merge_index)
self.assertFalse(merge_index.has_conflicts)
def test_repo_merge_uptodate(self):
branch_head_hex = 'e97b4cfd5db0fb4ebabf4f203979ca4e5d1c7c87'
branch_commit = self.repo.get(branch_head_hex)
head_commit = self.repo.get(self.repo.head.target.hex)
merge_index = self.repo.merge_commits(head_commit, branch_commit)
self.assertTrue(merge_index)
self.assertFalse(merge_index.has_conflicts)
def test_merge_analysis_fastforward(self):
branch_head_hex = 'e97b4cfd5db0fb4ebabf4f203979ca4e5d1c7c87'
branch_id = self.repo.get(branch_head_hex).id
analysis = self.repo.merge_analysis(branch_id)
self.assertFalse(analysis & GIT_MERGE_ANALYSIS_UP_TO_DATE)
self.assertTrue(analysis & GIT_MERGE_ANALYSIS_FASTFORWARD)
self.assertEqual({}, self.repo.status())
def test_merge_no_fastforward_no_conflicts(self):
branch_head_hex = '03490f16b15a09913edb3a067a3dc67fbb8d41f1'
branch_id = self.repo.get(branch_head_hex).id
analysis = self.repo.merge_analysis(branch_id)
self.assertFalse(analysis & GIT_MERGE_ANALYSIS_UP_TO_DATE)
self.assertFalse(analysis & GIT_MERGE_ANALYSIS_FASTFORWARD)
# Checking the index works as expected
self.assertEqual({}, self.repo.status())
self.assertEqual({}, self.repo.status())
def test_merge_no_fastforward_conflicts(self):
branch_head_hex = '1b2bae55ac95a4be3f8983b86cd579226d0eb247'
branch_id = self.repo.get(branch_head_hex).id
analysis = self.repo.merge_analysis(branch_id)
self.assertFalse(analysis & GIT_MERGE_ANALYSIS_UP_TO_DATE)
self.assertFalse(analysis & GIT_MERGE_ANALYSIS_FASTFORWARD)
self.repo.merge(branch_id)
status = pygit2.GIT_STATUS_WT_NEW | pygit2.GIT_STATUS_INDEX_DELETED
# Asking twice to assure the reference counting is correct
self.assertEqual({'.gitignore': status}, self.repo.status())
self.assertEqual({'.gitignore': status}, self.repo.status())
# Checking the index works as expected
self.repo.index.add('.gitignore')
self.repo.index.write()
self.assertEqual({'.gitignore': pygit2.GIT_STATUS_INDEX_MODIFIED}, self.repo.status())
def test_merge_invalid_hex(self):
branch_head_hex = '12345678'
self.assertRaises(KeyError, self.repo.merge, branch_head_hex)
def test_merge_already_something_in_index(self):
branch_head_hex = '03490f16b15a09913edb3a067a3dc67fbb8d41f1'
branch_oid = self.repo.get(branch_head_hex).id
with open(os.path.join(self.repo.workdir, 'inindex.txt'), 'w') as f:
f.write('new content')
self.repo.index.add('inindex.txt')
self.assertRaises(pygit2.GitError, self.repo.merge, branch_oid)
class RepositorySignatureTest(utils.RepoTestCase):
def test_default_signature(self):
config = self.repo.config
config['user.name'] = 'Random J Hacker'
config['user.email'] ='[email protected]'
sig = self.repo.default_signature
self.assertEqual('Random J Hacker', sig.name)
self.assertEqual('[email protected]', sig.email)
class NewRepositoryTest(utils.NoRepoTestCase):
def test_new_repo(self):
repo = init_repository(self._temp_dir, False)
oid = repo.write(GIT_OBJ_BLOB, "Test")
self.assertEqual(type(oid), Oid)
assert os.path.exists(os.path.join(self._temp_dir, '.git'))
class InitRepositoryTest(utils.NoRepoTestCase):
# under the assumption that repo.is_bare works
def test_no_arg(self):
repo = init_repository(self._temp_dir)
self.assertFalse(repo.is_bare)
def test_pos_arg_false(self):
repo = init_repository(self._temp_dir, False)
self.assertFalse(repo.is_bare)
def test_pos_arg_true(self):
repo = init_repository(self._temp_dir, True)
self.assertTrue(repo.is_bare)
def test_keyword_arg_false(self):
repo = init_repository(self._temp_dir, bare=False)
self.assertFalse(repo.is_bare)
def test_keyword_arg_true(self):
repo = init_repository(self._temp_dir, bare=True)
self.assertTrue(repo.is_bare)
class DiscoverRepositoryTest(utils.NoRepoTestCase):
def test_discover_repo(self):
repo = init_repository(self._temp_dir, False)
subdir = os.path.join(self._temp_dir, "test1", "test2")
os.makedirs(subdir)
self.assertEqual(repo.path, discover_repository(subdir))
class EmptyRepositoryTest(utils.EmptyRepoTestCase):
def test_is_empty(self):
self.assertTrue(self.repo.is_empty)
def test_is_base(self):
self.assertFalse(self.repo.is_bare)
def test_head(self):
self.assertTrue(self.repo.head_is_unborn)
self.assertFalse(self.repo.head_is_detached)
class CloneRepositoryTest(utils.NoRepoTestCase):
def test_clone_repository(self):
repo_path = "./test/data/testrepo.git/"
repo = clone_repository(repo_path, self._temp_dir)
self.assertFalse(repo.is_empty)
self.assertFalse(repo.is_bare)
def test_clone_bare_repository(self):
repo_path = "./test/data/testrepo.git/"
repo = clone_repository(repo_path, self._temp_dir, bare=True)
self.assertFalse(repo.is_empty)
self.assertTrue(repo.is_bare)
def test_clone_remote_name(self):
repo_path = "./test/data/testrepo.git/"
repo = clone_repository(
repo_path, self._temp_dir, remote_name="custom_remote")
self.assertFalse(repo.is_empty)
self.assertEqual(repo.remotes[0].name, "custom_remote")
# def test_clone_fetch_spec(self):
# repo_path = "./test/data/testrepo.git/"
# repo = clone_repository(
# repo_path, self._temp_dir)
# self.assertFalse(repo.is_empty)
# # FIXME: When pygit2 retrieve the fetchspec we passed to git clone.
# # fetchspec seems to be going through, but the Repository class is
# # not getting it.
# # self.assertEqual(repo.remotes[0].fetchspec, "refs/heads/test")
def test_clone_with_credentials(self):
credentials = pygit2.UserPass("libgit2", "libgit2")
repo = clone_repository(
"https://bitbucket.org/libgit2/testgitrepository.git",
self._temp_dir, credentials=credentials)
self.assertFalse(repo.is_empty)
def test_clone_push_spec(self):
repo_path = "./test/data/testrepo.git/"
repo = clone_repository(
repo_path, self._temp_dir)
self.assertFalse(repo.is_empty)
# FIXME: When pygit2 supports retrieving the pushspec parameter,
# enable this test
# not sure how to test this either... couldn't find pushspec
# self.assertEqual(repo.remotes[0].fetchspec, "refs/heads/test")
def test_clone_checkout_branch(self):
repo_path = "./test/data/testrepo.git/"
repo = clone_repository(
repo_path, self._temp_dir, checkout_branch="test"
)
self.assertFalse(repo.is_empty)
# FIXME: When pygit2 supports retrieving the current branch,
# enable this test
# self.assertEqual(repo.remotes[0].current_branch, "test")
class MergeResolveTest(utils.MergeResolveTestCase):
def test_tree_merge_conflicts(self):
ours = self.repo.revparse_single("trivial-4")
ours_tree = ours.tree
theirs = self.repo.revparse_single("trivial-4-branch")
theirs_tree = theirs.tree
merge_base = self.repo.merge_base(ours.hex,
theirs.hex)
merge_base_tree = self.repo.get(merge_base.hex).tree
merge_index = ours_tree.merge(theirs_tree, merge_base_tree)
self.assertTrue(merge_index)
self.assertTrue(merge_index.has_conflicts)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -5,802,755,868,871,716,000 | 37.145251 | 100 | 0.656464 | false |
hychrisli/PyAlgorithms | src/solutions/part2/q106_construct_bitree_in_post_order.py | 1 | 1275 | from src.base.solution import Solution
from src.tests.part2.q106_test_construct_bitree_in_post_order import ConstructBiTreeInPostOrderTestCases
from src.structures.treenode import TreeNode
class ConstructBiTreeInPostOrder(Solution):
def gen_test_cases(self):
return ConstructBiTreeInPostOrderTestCases()
def print_output(self, output):
print(output.get_tree_str())
def run_test(self, input):
return self.buildTree(input[0], input[1])
def verify_output(self, test_output, output):
return test_output.get_tree_str() == output.get_tree_str()
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
def helper(ins, ine, pts, pte ):
if ins > ine: return None
root = TreeNode(postorder[pte])
iroot = inorder.index(postorder[pte])
jroot = iroot - ins + pts
root.left = helper(ins, iroot - 1, pts, jroot - 1)
root.right = helper(iroot + 1, ine, jroot, pte - 1)
return root
return helper(0, len(inorder) - 1, 0, len(postorder) - 1)
if __name__ == '__main__':
sol = ConstructBiTreeInPostOrder()
sol.run_tests() | apache-2.0 | -8,719,448,121,257,199,000 | 28.674419 | 104 | 0.618824 | false |
rizauddin/apgg | APGG.py | 1 | 2315 | #!/usr/bin/env python
'''
Generate a graph with weight using student id
with a fix predefined number of edges.
Draw a graph with matplotlib.
Must have matplotlib and NetworkX for this to work.
'''
__author__ = """Rizauddin Saian ([email protected])"""
try:
import matplotlib.pyplot as plt
import networkx as nx
except:
raise
def generate_flow(id):
'''Generate a network flow with capacity using student id.
's': source
't': sink
Parameters:
id: Student id
Returns:
A directed network flow.
'''
strid = str(id)
# In Python 3, map returns an iterable object of type map,
# and not a subscriptible.
# Need to force a list.
nodes = list(map(lambda n: float(n) if float(n) else 1.0, strid))
#nodes = map(lambda n: float(n) if float(n) else 1.0, strid)
L = nx.DiGraph()
L.add_edge('s','a', capacity=nodes[0])
L.add_edge('s','b', capacity=nodes[1])
L.add_edge('a','c', capacity=nodes[2])
L.add_edge('b','c', capacity=nodes[3])
L.add_edge('b','d', capacity=nodes[4])
L.add_edge('d','e', capacity=nodes[5])
L.add_edge('e','f', capacity=nodes[5])
L.add_edge('f','t', capacity=nodes[5])
L.add_edge('c','t', capacity=nodes[6])
L.add_edge('e','t', capacity=nodes[7])
return L
def generate_graph(id=2015234031):
'''Parameters:
id: Student id
Returns:
A graph.
'''
#Generating edges. Assign to next vertex if v_i=id_i
edges = []
for v, id in enumerate(map(int, str(2013567257))):
if v == id:
edges.append((v, v+1))
else:
edges.append((v, id))
#initialize graph
G = nx.Graph()
#function to generate weight for an edge
getweight = lambda a, b: a*b if a*b else 1
for u, v in edges:
#add an edge
G.add_edge(u, v, weight=getweight(u, v))
return G
def draw(G, weight='weight'):
'''Draw graph G
'''
# positions for all nodes
pos = nx.random_layout(G)
# draw the graph
nx.draw(G,pos=pos, with_labels=True, with_edge_labels=True)
edge_weight=dict([((u,v,),int(d[weight]))
for u,v,d in G.edges(data=True)])
edge_labels=nx.draw_networkx_edge_labels(G,pos=pos,edge_labels=edge_weight)
| mit | -1,235,453,453,247,441,000 | 25.609195 | 79 | 0.587473 | false |
iogf/candocabot | plugins/snarf/snarf.py | 1 | 1150 | import urllib
import urllib2
import re
from ircutils.ehp import *
from htmlentitydefs import name2codepoint as entity
MAX_SIZE = 262144
def chmsg(event, server):
nicka = event['nicka']
chan = event['channel']
msg = event['msg']
"""I need to make it handle other domains """
pointer = re.search('(?P<addr>http[s]?://[^ ]*)', msg)
if not pointer:
return
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
url = opener.open(pointer.group('addr'))
data = url.read(MAX_SIZE)
html = Html()
tree = html.feed(data)
for title in Easy(tree)['title']:
title = '<%s> URL Title: %s' % (nicka, contents(title))
server.send_msg(chan, re.sub(r'\s+', ' ', title))
# Returns the text contents of the given EHP node.
def contents(node):
if node.name == DATA:
return node.data
if node.name == CODE:
return unichr(int(node.data)).encode('utf8')
if node.name == AMP:
if node.data in entity: return unichr(entity[node.data]).encode('utf8')
return '&%s;' % node.data
return ''.join(map(contents, node)) | apache-2.0 | -1,255,561,008,929,268,200 | 24.577778 | 80 | 0.609565 | false |
lpakula/django-ftp-deploy | ftp_deploy/server/views/service.py | 1 | 6971 | import json
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectMixin
from django.views.generic import (ListView, UpdateView, DeleteView,
DetailView, CreateView)
from django.core.urlresolvers import reverse_lazy, reverse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib import messages
from django.db.models import Max
from django.http import HttpResponse, HttpResponseRedirect, Http404
from braces.views import JSONResponseMixin, LoginRequiredMixin
from ftp_deploy.conf import *
from ftp_deploy.models import Service
from ftp_deploy.utils.repo import commits_parser
from ftp_deploy.server.forms import ServiceForm, ServiceNotificationForm
class DashboardView(LoginRequiredMixin, ListView):
"""View for dashboard"""
model = Service
queryset = Service.objects.all().select_related().order_by(
"status", "-log__created").annotate(date=Max('log__created'))
context_object_name = 'services'
template_name = "ftp_deploy/dashboard.html"
paginate_by = 25
def post(self, request, *args, **kwargs):
services = self.get_queryset()
if self.request.POST['services']:
services = services.filter(pk=self.request.POST['services'])
return render_to_response('ftp_deploy/service/list.html', locals(),
context_instance=RequestContext(request))
class ServiceManageView(LoginRequiredMixin, DetailView):
"""View for manage services"""
model = Service
context_object_name = 'service'
template_name = "ftp_deploy/service/manage.html"
def get_context_data(self, **kwargs):
context = super(ServiceManageView, self).get_context_data(**kwargs)
context['recent_logs'] = self.object.log_set.all()[:15]
context['fail_logs'] = self.object.log_set.filter(
status=0).filter(skip=0)
return context
class ServiceAddView(LoginRequiredMixin, CreateView):
"""View for add serives"""
model = Service
form_class = ServiceForm
template_name = "ftp_deploy/service/form.html"
def form_valid(self, form):
messages.add_message(self.request, messages.SUCCESS,
'Service has been added.')
return super(ServiceAddView, self).form_valid(form)
def get_success_url(self):
self.object.validate()
self.object.save()
return reverse('ftpdeploy_service_manage',
kwargs={'pk': self.object.pk})
class ServiceEditView(LoginRequiredMixin, UpdateView):
"""View for edit services"""
model = Service
form_class = ServiceForm
template_name = "ftp_deploy/service/form.html"
def form_valid(self, form):
self.object.validate()
self.object.save()
messages.add_message(self.request, messages.SUCCESS,
'Service has been updated.')
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('ftpdeploy_service_manage',
kwargs={'pk': self.kwargs['pk']})
class ServiceDeleteView(LoginRequiredMixin, DeleteView):
"""View for delete services"""
model = Service
success_url = reverse_lazy('ftpdeploy_dashboard')
template_name = "ftp_deploy/service/delete.html"
def delete(self, request, *args, **kwargs):
messages.add_message(request, messages.SUCCESS,
'Service has been removed.')
return super(ServiceDeleteView, self).delete(request, *args, **kwargs)
class ServiceStatusView(JSONResponseMixin, LoginRequiredMixin,
SingleObjectMixin, View):
"""View for update(save) and check service status"""
model = Service
def post(self, request, *args, **kwargs):
service = self.get_object()
service.validate()
service.save()
response = request.POST.get('response', '')
if response == 'list':
services = [service]
return render_to_response('ftp_deploy/service/list.html', locals(),
context_instance=RequestContext(request))
if response == 'manage':
manage_view = ServiceManageView()
manage_view.object = service
context = manage_view.get_context_data()
return render_to_response(
'ftp_deploy/service/manage.html',
context, context_instance=RequestContext(request))
if response == 'json':
context = {
'status': service.status,
'status_message': service.status_message,
'updated': service.updated
}
return self.render_json_response(context)
raise Http404
class ServiceRestoreView(LoginRequiredMixin, DetailView):
""""View for build restore path for service"""
model = Service
prefetch_related = ["log_set"]
template_name = "ftp_deploy/service/restore-modal.html"
def get_context_data(self, **kwargs):
context = super(ServiceRestoreView, self).get_context_data(**kwargs)
service = self.get_object()
logs = service.get_logs_tree()
# init payload dictionary
context['payload'] = json.loads(logs[0].payload)
if service.repo_source == 'bb':
context['payload']['user'] = 'Restore'
elif service.repo_source == 'gh':
context['payload']['pusher']['name'] = 'Restore'
context['service'] = service
commits = list()
for log in logs:
payload = json.loads(log.payload)
commits += payload['commits']
context['payload']['commits'] = commits
context['payload'] = json.dumps(context['payload'])
context['files_added'], context['files_modified'], context['files_removed'] = commits_parser(commits, service.repo_source).file_diff()
context['commits_info'] = commits_parser(commits, service.repo_source).commits_info()
return context
def post(self, request, *args, **kwargs):
if self.get_object().lock():
return HttpResponse(status=500)
return HttpResponse(
reverse('ftpdeploy_deploy',
kwargs={'secret_key': self.get_object().secret_key}))
class ServiceNotificationView(LoginRequiredMixin, UpdateView):
model = Service
form_class = ServiceNotificationForm
template_name = "ftp_deploy/notification/notification-modal.html"
def form_valid(self, form):
messages.add_message(self.request, messages.SUCCESS,
'Service notification has been updated.')
return super(ServiceNotificationView, self).form_valid(form)
def get_success_url(self):
return reverse('ftpdeploy_service_manage',
kwargs={'pk': self.kwargs['pk']})
| mit | -8,609,571,675,612,642,000 | 33.004878 | 142 | 0.635203 | false |
sluger/Restful-Django-Angular-Tutorial | formsettutorial/formsetapp/views.py | 1 | 5116 | from django.views.generic import CreateView, UpdateView
from django.shortcuts import redirect, render
from django.views.decorators.csrf import csrf_protect
from .models import Author, Book
from .forms import AuthorForm, BookFormSet, BookForm, AuthorFormSet
'''
Serverside Django views, not needed for restful services. The tutorial
was built upon formset views at first.
'''
class AddAuthorView(CreateView):
template_name = 'formsetapp/create_author.html'
form_class = AuthorForm
def get_context_data(self, **kwargs):
context = super(AddAuthorView, self).get_context_data(**kwargs)
if self.request.POST:
context['formset'] = BookFormSet(self.request.POST)
else:
context['formset'] = BookFormSet()
return context
def form_valid(self, form):
context = self.get_context_data()
formset = context['formset']
if formset.is_valid():
self.object = form.save()
formset.instance = self.object
formset.save()
return redirect('authors')
else:
return self.render_to_response(self.get_context_data(form=form))
class EditAuthorView(UpdateView):
template_name = 'formsetapp/edit_author.html'
form_class = AuthorForm
def get_object(self, queryset=None):
obj = Author.objects.get(id=self.kwargs['id'])
return obj
def get_context_data(self, **kwargs):
context = super(EditAuthorView, self).get_context_data(**kwargs)
if self.request == 'POST':
context['formset'] = BookFormSet(self.request.PUT, instance=self.object)
else:
context['formset'] = BookFormSet(instance=self.object)
return context
def form_valid(self, form):
context = self.get_context_data()
print context
formset = context['formset']
if formset.is_valid():
#self.object = form.save()
#formset.instance = self.object
form.save()
formset.save()
return redirect('authors')
else:
#return self.render_to_response(self.get_context_data(form=form))
return super(EditAuthorView, self).form_valid(form)
def authors(request, template_name='formsetapp/authors.html'):
authors = Author.objects.all()
context = {}
context['object_list'] = authors
return render(request, template_name, context)
def author_delete(request, id, template_name='formsetapp/author_confirm_delete.html'):
author = get_object_or_404(Author, id=id)
if request.method=='POST':
author.delete()
return redirect('authors')
return render(request, template_name, {'author':author})
class AddBookView(CreateView):
template_name = 'formsetapp/create_book.html'
form_class = BookForm
def get_context_data(self, **kwargs):
context = super(AddBookView, self).get_context_data(**kwargs)
if self.request.POST:
context['formset'] = BookFormSet(self.request.POST)
else:
context['formset'] = BookFormSet()
return context
def form_valid(self, form):
context = self.get_context_data()
formset = context['formset']
if formset.is_valid():
self.object = form.save()
formset.instance = self.object
formset.save()
return redirect('books')
else:
return self.render_to_response(self.get_context_data(form=form))
class EditBookView(UpdateView):
template_name = 'formsetapp/edit_book.html'
form_class = BookForm
def get_object(self, queryset=None):
obj = Book.objects.get(id=self.kwargs['id'])
return obj
def get_context_data(self, **kwargs):
context = super(EditBookView, self).get_context_data(**kwargs)
if self.request == 'POST':
context['formset'] = AuthorFormSet(self.request.PUT, instance=self.object)
else:
context['formset'] = AuthorFormSet(instance=self.object)
return context
def form_valid(self, form):
context = self.get_context_data()
print context
formset = context['formset']
if formset.is_valid():
#self.object = form.save()
#formset.instance = self.object
form.save()
formset.save()
return redirect('authors')
else:
#return self.render_to_response(self.get_context_data(form=form))
return super(EditBookView, self).form_valid(form)
def books(request, template_name='formsetapp/books.html'):
books = Book.objects.all()
context = {}
context['object_list'] = books
return render(request, template_name, context)
def book_delete(request, id, template_name='formsetapp/book_confirm_delete.html'):
book = get_object_or_404(Book, id=id)
if request.method=='POST':
book.delete()
return redirect('books')
return render(request, template_name, {'book':book})
def index(request, template_name='formsetapp/index.html'):
return render(request, template_name)
| mit | 1,796,619,267,745,147,600 | 33.802721 | 86 | 0.632721 | false |
jimsize/PySolFC | pysollib/gamedb.py | 1 | 29048 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------##
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------##
# imports
import sys
import imp
# PySol imports
from pysollib.mfxutil import Struct, print_err
from pysollib.resource import CSI
import pysollib.settings
from pysollib.mygettext import _, n_
if sys.version_info > (3,):
basestring = str
unicode = str
# ************************************************************************
# * constants
# ************************************************************************
# GameInfo constants
class GI:
# game category - these *must* match the cardset CSI.TYPE_xxx
GC_FRENCH = CSI.TYPE_FRENCH
GC_HANAFUDA = CSI.TYPE_HANAFUDA
GC_TAROCK = CSI.TYPE_TAROCK
GC_MAHJONGG = CSI.TYPE_MAHJONGG
GC_HEXADECK = CSI.TYPE_HEXADECK
GC_MUGHAL_GANJIFA = CSI.TYPE_MUGHAL_GANJIFA
GC_NAVAGRAHA_GANJIFA = CSI.TYPE_NAVAGRAHA_GANJIFA
GC_DASHAVATARA_GANJIFA = CSI.TYPE_DASHAVATARA_GANJIFA
GC_TRUMP_ONLY = CSI.TYPE_TRUMP_ONLY
# game type
GT_1DECK_TYPE = 0
GT_2DECK_TYPE = 1
GT_3DECK_TYPE = 2
GT_4DECK_TYPE = 3
GT_BAKERS_DOZEN = 4
GT_BELEAGUERED_CASTLE = 5
GT_CANFIELD = 6
GT_DASHAVATARA_GANJIFA = 7
GT_FAN_TYPE = 8
GT_FORTY_THIEVES = 9
GT_FREECELL = 10
GT_GOLF = 11
GT_GYPSY = 12
GT_HANAFUDA = 13
GT_HEXADECK = 14
GT_KLONDIKE = 15
GT_MAHJONGG = 16
GT_MATRIX = 17
GT_MEMORY = 18
GT_MONTANA = 19
GT_MUGHAL_GANJIFA = 20
GT_NAPOLEON = 21
GT_NAVAGRAHA_GANJIFA = 22
GT_NUMERICA = 23
GT_PAIRING_TYPE = 24
GT_POKER_TYPE = 25
GT_PUZZLE_TYPE = 26
GT_RAGLAN = 27
GT_ROW_TYPE = 28
GT_SIMPLE_TYPE = 29
GT_SPIDER = 30
GT_TAROCK = 31
GT_TERRACE = 32
GT_YUKON = 33
GT_SHISEN_SHO = 34
GT_CUSTOM = 40
# extra flags
GT_BETA = 1 << 12 # beta version of game driver
GT_CHILDREN = 1 << 13 # *not used*
GT_CONTRIB = 1 << 14 # contributed games under the GNU GPL
GT_HIDDEN = 1 << 15 # not visible in menus, but games can be loaded
GT_OPEN = 1 << 16
GT_ORIGINAL = 1 << 17
GT_POPULAR = 1 << 18 # *not used*
GT_RELAXED = 1 << 19
GT_SCORE = 1 << 20 # game has some type of scoring
GT_SEPARATE_DECKS = 1 << 21
GT_XORIGINAL = 1 << 22 # original games by other people, not playable
# skill level
SL_LUCK = 1
SL_MOSTLY_LUCK = 2
SL_BALANCED = 3
SL_MOSTLY_SKILL = 4
SL_SKILL = 5
#
TYPE_NAMES = {
GT_BAKERS_DOZEN: n_("Baker's Dozen"),
GT_BELEAGUERED_CASTLE: n_("Beleaguered Castle"),
GT_CANFIELD: n_("Canfield"),
GT_FAN_TYPE: n_("Fan"),
GT_FORTY_THIEVES: n_("Forty Thieves"),
GT_FREECELL: n_("FreeCell"),
GT_GOLF: n_("Golf"),
GT_GYPSY: n_("Gypsy"),
GT_KLONDIKE: n_("Klondike"),
GT_MONTANA: n_("Montana"),
GT_NAPOLEON: n_("Napoleon"),
GT_NUMERICA: n_("Numerica"),
GT_PAIRING_TYPE: n_("Pairing"),
GT_RAGLAN: n_("Raglan"),
GT_SIMPLE_TYPE: n_("Simple games"),
GT_SPIDER: n_("Spider"),
GT_TERRACE: n_("Terrace"),
GT_YUKON: n_("Yukon"),
GT_1DECK_TYPE: n_("One-Deck games"),
GT_2DECK_TYPE: n_("Two-Deck games"),
GT_3DECK_TYPE: n_("Three-Deck games"),
GT_4DECK_TYPE: n_("Four-Deck games"),
}
# SELECT_GAME_BY_TYPE = []
# for gt, name in TYPE_NAMES.items():
# if not name.endswith('games'):
# name = name+n_(' type')
# SELECT_GAME_BY_TYPE.append(
# (name, lambda gi, gt=gt: gi.si.game_type == gt))
# SELECT_GAME_BY_TYPE = tuple(SELECT_GAME_BY_TYPE)
SELECT_GAME_BY_TYPE = (
(n_("Baker's Dozen type"), lambda gi,
gt=GT_BAKERS_DOZEN: gi.si.game_type == gt),
(n_("Beleaguered Castle type"),
lambda gi, gt=GT_BELEAGUERED_CASTLE: gi.si.game_type == gt),
(n_("Canfield type"),
lambda gi, gt=GT_CANFIELD: gi.si.game_type == gt),
(n_("Fan type"), lambda gi, gt=GT_FAN_TYPE: gi.si.game_type == gt),
(n_("Forty Thieves type"),
lambda gi, gt=GT_FORTY_THIEVES: gi.si.game_type == gt),
(n_("FreeCell type"),
lambda gi, gt=GT_FREECELL: gi.si.game_type == gt),
(n_("Golf type"), lambda gi, gt=GT_GOLF: gi.si.game_type == gt),
(n_("Gypsy type"), lambda gi, gt=GT_GYPSY: gi.si.game_type == gt),
(n_("Klondike type"),
lambda gi, gt=GT_KLONDIKE: gi.si.game_type == gt),
(n_("Montana type"), lambda gi, gt=GT_MONTANA: gi.si.game_type == gt),
(n_("Napoleon type"),
lambda gi, gt=GT_NAPOLEON: gi.si.game_type == gt),
(n_("Numerica type"),
lambda gi, gt=GT_NUMERICA: gi.si.game_type == gt),
(n_("Pairing type"),
lambda gi, gt=GT_PAIRING_TYPE: gi.si.game_type == gt),
(n_("Raglan type"), lambda gi, gt=GT_RAGLAN: gi.si.game_type == gt),
(n_("Simple games"),
lambda gi, gt=GT_SIMPLE_TYPE: gi.si.game_type == gt),
(n_("Spider type"), lambda gi, gt=GT_SPIDER: gi.si.game_type == gt),
(n_("Terrace type"), lambda gi, gt=GT_TERRACE: gi.si.game_type == gt),
(n_("Yukon type"), lambda gi, gt=GT_YUKON: gi.si.game_type == gt),
(n_("One-Deck games"),
lambda gi, gt=GT_1DECK_TYPE: gi.si.game_type == gt),
(n_("Two-Deck games"),
lambda gi, gt=GT_2DECK_TYPE: gi.si.game_type == gt),
(n_("Three-Deck games"),
lambda gi, gt=GT_3DECK_TYPE: gi.si.game_type == gt),
(n_("Four-Deck games"),
lambda gi, gt=GT_4DECK_TYPE: gi.si.game_type == gt),
)
SELECT_ORIGINAL_GAME_BY_TYPE = (
(n_("French type"), lambda gi, gf=GT_ORIGINAL,
gt=(
GT_HANAFUDA,
GT_HEXADECK, GT_MUGHAL_GANJIFA, GT_NAVAGRAHA_GANJIFA,
GT_DASHAVATARA_GANJIFA, GT_TAROCK,): gi.si.game_flags & gf and
gi.si.game_type not in gt),
(n_("Ganjifa type"), lambda gi, gf=GT_ORIGINAL,
gt=(GT_MUGHAL_GANJIFA, GT_NAVAGRAHA_GANJIFA,
GT_DASHAVATARA_GANJIFA,): gi.si.game_flags & gf and
gi.si.game_type in gt),
(n_("Hanafuda type"), lambda gi, gf=GT_ORIGINAL, gt=GT_HANAFUDA:
gi.si.game_flags & gf and gi.si.game_type == gt),
(n_("Hex A Deck type"), lambda gi, gf=GT_ORIGINAL, gt=GT_HEXADECK:
gi.si.game_flags & gf and gi.si.game_type == gt),
(n_("Tarock type"), lambda gi, gf=GT_ORIGINAL, gt=GT_TAROCK:
gi.si.game_flags & gf and gi.si.game_type == gt),
)
SELECT_CONTRIB_GAME_BY_TYPE = (
(n_("French type"), lambda gi, gf=GT_CONTRIB,
gt=(GT_HANAFUDA, GT_HEXADECK, GT_MUGHAL_GANJIFA,
GT_NAVAGRAHA_GANJIFA, GT_DASHAVATARA_GANJIFA, GT_TAROCK,):
gi.si.game_flags & gf and gi.si.game_type not in gt),
(n_("Ganjifa type"), lambda gi, gf=GT_CONTRIB,
gt=(GT_MUGHAL_GANJIFA, GT_NAVAGRAHA_GANJIFA,
GT_DASHAVATARA_GANJIFA,):
gi.si.game_flags & gf and gi.si.game_type in gt),
(n_("Hanafuda type"), lambda gi, gf=GT_CONTRIB, gt=GT_HANAFUDA:
gi.si.game_flags & gf and gi.si.game_type == gt),
(n_("Hex A Deck type"), lambda gi, gf=GT_CONTRIB, gt=GT_HEXADECK:
gi.si.game_flags & gf and gi.si.game_type == gt),
(n_("Tarock type"), lambda gi, gf=GT_CONTRIB, gt=GT_TAROCK:
gi.si.game_flags & gf and gi.si.game_type == gt),
)
SELECT_ORIENTAL_GAME_BY_TYPE = (
(n_("Dashavatara Ganjifa type"), lambda gi, gt=GT_DASHAVATARA_GANJIFA:
gi.si.game_type == gt),
(n_("Ganjifa type"), lambda gi,
gt=(GT_MUGHAL_GANJIFA, GT_NAVAGRAHA_GANJIFA,
GT_DASHAVATARA_GANJIFA,): gi.si.game_type in gt),
(n_("Hanafuda type"),
lambda gi, gt=GT_HANAFUDA: gi.si.game_type == gt),
(n_("Mughal Ganjifa type"),
lambda gi, gt=GT_MUGHAL_GANJIFA: gi.si.game_type == gt),
(n_("Navagraha Ganjifa type"),
lambda gi, gt=GT_NAVAGRAHA_GANJIFA: gi.si.game_type == gt),
)
SELECT_SPECIAL_GAME_BY_TYPE = (
(n_("Shisen-Sho"), lambda gi, gt=GT_SHISEN_SHO: gi.si.game_type == gt),
(n_("Hex A Deck type"),
lambda gi, gt=GT_HEXADECK: gi.si.game_type == gt),
(n_("Matrix type"), lambda gi, gt=GT_MATRIX: gi.si.game_type == gt),
(n_("Memory type"), lambda gi, gt=GT_MEMORY: gi.si.game_type == gt),
(n_("Poker type"), lambda gi, gt=GT_POKER_TYPE: gi.si.game_type == gt),
(n_("Puzzle type"),
lambda gi, gt=GT_PUZZLE_TYPE: gi.si.game_type == gt),
(n_("Tarock type"), lambda gi, gt=GT_TAROCK: gi.si.game_type == gt),
)
# These obsolete gameids have been used in previous versions of
# PySol and are no longer supported because of internal changes
# (mainly rule changes). The game has been assigned a new id.
PROTECTED_GAMES = {
22: 106, # Double Canfield
32: 901, # La Belle Lucie (Midnight Oil)
52: 903, # Aces Up
72: 115, # Little Forty
75: 126, # Red and Black
82: 901, # La Belle Lucie (Midnight Oil)
# 155: 5034, # Mahjongg - Flying Dragon
# 156: 5035, # Mahjongg - Fortress Towers
262: 105, # Canfield
902: 88, # Trefoil
904: 68, # Lexington Harp
297: 631, # Alternation/Alternations
}
GAMES_BY_COMPATIBILITY = (
# Atari ST Patience game v2.13 (we have 10 out of 10 games)
("Atari ST Patience", (1, 3, 4, 7, 12, 14, 15, 16, 17, 39,)),
# Gnome AisleRiot 1.0.51 (we have 28 out of 32 games)
# still missing: Camelot, Clock, Thieves, Thirteen
# ("Gnome AisleRiot 1.0.51", (
# 2, 8, 11, 19, 27, 29, 33, 34, 35, 40,
# 41, 42, 43, 58, 59, 92, 93, 94, 95, 96,
# 100, 105, 111, 112, 113, 130, 200, 201,
# )),
# Gnome AisleRiot 1.4.0.1 (we have XX out of XX games)
# ("Gnome AisleRiot", (
# 1, 2, 8, 11, 19, 27, 29, 33, 34, 35, 40,
# 41, 42, 43, 58, 59, 92, 93, 94, 95, 96,
# 100, 105, 111, 112, 113, 130, 200, 201,
# )),
# Gnome AisleRiot 2.2.0 (we have 61 out of 70 games)
# still missing:
# Gay gordons, Helsinki,
# Isabel, Labyrinth, Quatorze, Thieves,
# Treize, Valentine, Yeld.
("Gnome AisleRiot", (
1, 2, 8, 9, 11, 12, 19, 24, 27, 29, 31, 33, 34, 35, 36, 40,
41, 42, 43, 45, 48, 58, 59, 67, 89, 91, 92, 93, 94, 95, 96,
100, 105, 111, 112, 113, 130, 139, 144, 146, 147, 148, 200,
201, 206, 224, 225, 229, 230, 233, 257, 258, 280, 281, 282,
283, 284, 551, 552, 553, 737,
)),
# KDE Patience 0.7.3 from KDE 1.1.2 (we have 6 out of 9 games)
# ("KDE Patience 0.7.3", (2, 7, 8, 18, 256, 903,)),
# KDE Patience 2.0 from KDE 2.1.2 (we have 11 out of 13 games)
# ("KDE Patience", (1, 2, 7, 8, 18, 19, 23, 50, 256, 261, 903,)),
# KDE Patience 2.0 from KDE 2.2beta1 (we have 12 out of 14 games)
# ("KDE Patience", (1, 2, 7, 8, 18, 19, 23, 36, 50, 256, 261, 903,)),
# KDE Patience 2.0 from KDE 3.1.1 (we have 15 out of 15 games)
("KDE Patience", (1, 2, 7, 8, 18, 19, 23, 36, 50,
256, 261, 277, 278, 279, 903,)),
# xpat2 1.06 (we have 14 out of 16 games)
# still missing: Michael's Fantasy, modCanfield
("xpat2", (
1, 2, 8, 9, 11, 31, 54, 63, 89, 105, 901, 256, 345, 903,
)),
)
GAMES_BY_INVENTORS = (
("Paul Alfille", (8,)),
("C.L. Baker", (45,)),
("David Bernazzani", (314,)),
("Gordon Bower", (763,)),
("Art Cabral", (9,)),
("Robert Harbin", (381,)),
("Robert Hogue", (22216,)),
("Charles Jewell", (220, 309,)),
("Michael Keller", (592,)),
("Fred Lunde", (459,)),
("Albert Morehead and Geoffrey Mott-Smith", (25, 42, 48, 173, 282,
303, 362, 547, 738)),
("David Parlett", (64, 98, 294, 338, 654, 674,)),
("Randy Rasa", (187, 190, 191, 192,)),
("Captain Jeffrey T. Spaulding", (400,)),
("Adam Selene", (366,)),
("John Stoneham", (201,)),
("Bryan Stout", (655,)),
("Bill Taylor", (349,)),
("Thomas Warfield", (189, 264, 300, 320, 336, 337, 359,
415, 427, 458, 495, 496, 497, 508,)),
)
GAMES_BY_PYSOL_VERSION = (
("1.00", (1, 2, 3, 4)),
("1.01", (5, 6)),
("1.02", (7, 8, 9)),
("1.03", (10, 11, 12, 13)),
("1.10", (14,)),
("1.11", (15, 16, 17)),
("2.00", (256, 257)),
("2.01", (258, 259, 260, 261)),
("2.02", (105,)),
("2.90", (18, 19, 20, 21, 106, 23, 24, 25, 26, 27,
28, 29, 30, 31, 901, 33, 34, 35, 36)),
("2.99", (37,)),
("3.00", (38, 39,
40, 41, 42, 43, 45, 46, 47, 48, 49,
50, 51, 903, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 115, 73, 74, 126, 76, 77, 78, 79,
80, 81, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 107, 108,)),
("3.10", (109, 110, 111, 112, 113, 114, 116, 117, 118, 119,
120, 121, 122, 123, 124, 125, 127)),
("3.20", (128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
138, 139, 140, 141, 142,
12345, 12346, 12347, 12348, 12349, 12350, 12351, 12352)),
("3.21", (143, 144)),
("3.30", (145, 146, 147, 148, 149, 150, 151)),
("3.40", (152, 153, 154)),
("4.00", (157, 158, 159, 160, 161, 162, 163, 164)),
("4.20", (165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
175, 176, 177, 178)),
("4.30", (179, 180, 181, 182, 183, 184)),
("4.41", (185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
195, 196, 197, 198, 199)),
("4.60", (200, 201, 202, 203, 204, 205,
206, 207, 208, 209,
210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
230, 231, 232, 233, 234, 235, 236)),
("4.70", (237,)),
('fc-0.5.0', ( # moved from Ultrasol
# 121, 122, 187, 188, 189, 190, 191, 192, 194, 197, 198,
5301, 5302, 9011, 11001, 11002, 11003, 11004, 11005,
11006, 12353, 12354, 12355, 12356, 12357, 12358, 12359,
12360, 12361, 12362, 12363, 12364, 12365, 12366, 12367,
12368, 12369, 12370, 12371, 12372, 12373, 12374, 12375,
12376, 12377, 12378, 12379, 12380, 12381, 12382, 12383,
12384, 12385, 13001, 13002, 13003, 13004, 13005, 13006,
13007, 13008, 13009, 13010, 13011, 13012, 13013, 13014,
13163, 13164, 13165, 13166, 13167, 14401, 14402, 14403,
14404, 14405, 14406, 14407, 14408, 14409, 14410, 14411,
14412, 14413, 15406, 15407, 15408, 15409, 15410, 15411,
15412, 15413, 15414, 15415, 15416, 15417, 15418, 15419,
15420, 15421, 15422, 16000, 16001, 16002, 16003, 16004,
16666, 16667, 16668, 16669, 16670, 16671, 16672, 16673,
16674, 16675, 16676, 16677, 16678, 16679, 16680, 22216,
22223, 22224, 22225, 22226, 22227, 22228, 22229, 22230,
22231, 22232,)),
('fc-0.8.0', tuple(range(263, 323))), # exclude 297
('fc-0.9.0', tuple(range(323, 421))),
('fc-0.9.1', tuple(range(421, 441))),
('fc-0.9.2', tuple(range(441, 466))),
('fc-0.9.3', tuple(range(466, 661))),
('fc-0.9.4', tuple(range(661, 671))),
('fc-1.0', tuple(range(671, 711))),
('fc-1.1', tuple(range(711, 759))),
('fc-2.0', tuple(range(11011, 11014)) + tuple(range(759, 767))),
)
# deprecated - the correct way is to or a GI.GT_XXX flag
# in the registerGame() call
_CHILDREN_GAMES = [16, 33, 55, 90, 91, 96, 97, 176, 903, ]
_OPEN_GAMES = []
_POPULAR_GAMES = [
1, # Gypsy
2, # Klondike
7, # Picture Galary
8, # FreeCell
9, # Seahaven Towers
11, # Spider
12, # Braid
13, # Forty Thieves
14, # Grounds for a Divorce
19, # Yukon
31, # Baker's Dozen
36, # Golf
38, # Pyramid
105, # Canfield
158, # Imperial Trumps
279, # Kings
903, # Ace Up
5034, # Mahjongg Flying Dragon
5401, # Mahjongg Taipei
12345, # Oonsoo
]
# ************************************************************************
# * core games database
# ************************************************************************
class GameInfoException(Exception):
pass
class GameInfo(Struct):
def __init__(self, id, gameclass, name,
game_type, decks, redeals,
skill_level=None,
# keyword arguments:
si={}, category=0,
short_name=None, altnames=(),
suits=list(range(4)), ranks=list(range(13)), trumps=(),
rules_filename=None,
):
#
def to_unicode(s):
if isinstance(s, unicode):
return s
try:
s = unicode(s, 'utf-8')
except UnicodeDecodeError as err:
print_err(err)
s = unicode(s, 'utf-8', 'ignore')
return s
ncards = decks * (len(suits) * len(ranks) + len(trumps))
game_flags = game_type & ~1023
game_type = game_type & 1023
name = to_unicode(name)
en_name = name # for app.getGameRulesFilename
if pysollib.settings.TRANSLATE_GAME_NAMES:
name = _(name)
if not short_name:
short_name = name
else:
short_name = to_unicode(short_name)
if pysollib.settings.TRANSLATE_GAME_NAMES:
short_name = _(short_name)
if isinstance(altnames, basestring):
altnames = (altnames,)
altnames = [to_unicode(n) for n in altnames]
if pysollib.settings.TRANSLATE_GAME_NAMES:
altnames = [_(n) for n in altnames]
#
if not (1 <= category <= 9):
if game_type == GI.GT_HANAFUDA:
category = GI.GC_HANAFUDA
elif game_type == GI.GT_TAROCK:
category = GI.GC_TAROCK
elif game_type == GI.GT_MAHJONGG:
category = GI.GC_MAHJONGG
elif game_type == GI.GT_HEXADECK:
category = GI.GC_HEXADECK
elif game_type == GI.GT_MUGHAL_GANJIFA:
category = GI.GC_MUGHAL_GANJIFA
elif game_type == GI.GT_NAVAGRAHA_GANJIFA:
category = GI.GC_NAVAGRAHA_GANJIFA
elif game_type == GI.GT_DASHAVATARA_GANJIFA:
category = GI.GC_DASHAVATARA_GANJIFA
else:
category = GI.GC_FRENCH
#
if not (1 <= id <= 999999):
raise GameInfoException(name+": invalid game ID "+str(id))
if category == GI.GC_MAHJONGG:
if decks % 4:
raise GameInfoException(name+": invalid number of decks " +
str(id))
else:
if not (1 <= decks <= 4):
raise GameInfoException(
name+": invalid number of decks "+str(id))
if not name:
raise GameInfoException(name+": invalid game name")
if GI.PROTECTED_GAMES.get(id):
raise GameInfoException(name+": protected game ID "+str(id))
#
for f, l in ((GI.GT_CHILDREN, GI._CHILDREN_GAMES),
(GI.GT_OPEN, GI._OPEN_GAMES),
(GI.GT_POPULAR, GI._POPULAR_GAMES)):
if (game_flags & f) and (id not in l):
l.append(id)
elif not (game_flags & f) and (id in l):
game_flags = game_flags | f
# si is the SelectionInfo struct that will be queried by
# the "select game" dialogs. It can be freely modified.
gi_si = Struct(game_type=game_type, game_flags=game_flags,
decks=decks, redeals=redeals, ncards=ncards)
gi_si.update(si)
#
Struct.__init__(self, id=id, gameclass=gameclass,
name=name, short_name=short_name,
altnames=tuple(altnames), en_name=en_name,
decks=decks, redeals=redeals, ncards=ncards,
category=category, skill_level=skill_level,
suits=tuple(suits), ranks=tuple(ranks),
trumps=tuple(trumps),
si=gi_si, rules_filename=rules_filename)
class GameManager:
def __init__(self):
self.__selected_key = -1
self.__games = {}
self.__gamenames = {}
self.__games_by_id = None
self.__games_by_name = None
self.__games_by_short_name = None
self.__games_by_altname = None
self.__all_games = {} # includes hidden games
self.__all_gamenames = {} # includes hidden games
self.__games_for_solver = []
self.check_game = True
self.current_filename = None
self.registered_game_types = {}
self.callback = None # update progress-bar (see main.py)
self._num_games = 0 # for callback only
def setCallback(self, func):
self.callback = func
def getSelected(self):
return self.__selected_key
def setSelected(self, gameid):
assert gameid in self.__all_games
self.__selected_key = gameid
def get(self, key):
return self.__all_games.get(key)
def _check_game(self, gi):
# print 'check game:', gi.id, gi.short_name.encode('utf-8')
if gi.id in self.__all_games:
raise GameInfoException("duplicate game ID %s: %s and %s" %
(gi.id, str(gi.gameclass),
str(self.__all_games[gi.id].gameclass)))
if gi.name in self.__all_gamenames:
gameclass = self.__all_gamenames[gi.name].gameclass
raise GameInfoException("duplicate game name %s: %s and %s" %
(gi.name, str(gi.gameclass),
str(gameclass)))
if 1:
for id, game in self.__all_games.items():
if gi.gameclass is game.gameclass:
raise GameInfoException(
"duplicate game class %s: %s and %s" %
(gi.id, str(gi.gameclass), str(game.gameclass)))
for n in gi.altnames:
if n in self.__all_gamenames:
raise GameInfoException("duplicate game altname %s: %s" %
(gi.id, n))
def register(self, gi):
# print gi.id, gi.short_name.encode('utf-8')
if not isinstance(gi, GameInfo):
raise GameInfoException("wrong GameInfo class")
if self.check_game and pysollib.settings.CHECK_GAMES:
self._check_game(gi)
# if 0 and gi.si.game_flags & GI.GT_XORIGINAL:
# return
# print gi.id, gi.name
self.__all_games[gi.id] = gi
self.__all_gamenames[gi.name] = gi
for n in gi.altnames:
self.__all_gamenames[n] = gi
if not (gi.si.game_flags & GI.GT_HIDDEN):
self.__games[gi.id] = gi
self.__gamenames[gi.name] = gi
for n in gi.altnames:
self.__gamenames[n] = gi
# invalidate sorted lists
self.__games_by_id = None
self.__games_by_name = None
# update registry
k = gi.si.game_type
self.registered_game_types[k] = \
self.registered_game_types.get(k, 0) + 1
# if not gi.si.game_type == GI.GT_MAHJONGG:
# for v, k in GI.GAMES_BY_PYSOL_VERSION:
# if gi.id in k: break
# else:
# print gi.id
if hasattr(gi.gameclass, 'Solver_Class') and \
gi.gameclass.Solver_Class is not None:
self.__games_for_solver.append(gi.id)
if self.current_filename is not None:
gi.gameclass.MODULE_FILENAME = self.current_filename
if self.callback and self._num_games % 10 == 0:
self.callback()
self._num_games += 1
#
# access games database - we do not expose hidden games
#
def getAllGames(self):
# return self.__all_games
return list(self.__games.values())
def getGamesIdSortedById(self):
if self.__games_by_id is None:
lst = list(self.__games.keys())
lst.sort()
self.__games_by_id = tuple(lst)
return self.__games_by_id
def getGamesIdSortedByName(self):
if self.__games_by_name is None:
l1, l2, l3 = [], [], []
for id, gi in self.__games.items():
name = gi.name # .lower()
l1.append((name, id))
if gi.name != gi.short_name:
name = gi.short_name # .lower()
l2.append((name, id))
for n in gi.altnames:
name = n # .lower()
l3.append((name, id, n))
l1.sort()
l2.sort()
l3.sort()
self.__games_by_name = tuple([i[1] for i in l1])
self.__games_by_short_name = tuple([i[1] for i in l2])
self.__games_by_altname = tuple([i[1:] for i in l3])
return self.__games_by_name
def getGamesIdSortedByShortName(self):
if self.__games_by_name is None:
self.getGamesIdSortedByName()
return self.__games_by_short_name
# note: this contains tuples as entries
def getGamesTuplesSortedByAlternateName(self):
if self.__games_by_name is None:
self.getGamesIdSortedByName()
return self.__games_by_altname
# find game by name
def getGameByName(self, name):
gi = self.__all_gamenames.get(name)
if gi:
return gi.id
return None
def getGamesForSolver(self):
return self.__games_for_solver
# ************************************************************************
# *
# ************************************************************************
# the global game database (the single instance of class GameManager)
GAME_DB = GameManager()
def registerGame(gameinfo):
GAME_DB.register(gameinfo)
return gameinfo
def loadGame(modname, filename, check_game=False):
# print "load game", modname, filename
GAME_DB.check_game = check_game
GAME_DB.current_filename = filename
imp.load_source(modname, filename)
# execfile(filename, globals(), globals())
GAME_DB.current_filename = None
| gpl-3.0 | 4,920,427,938,661,608,000 | 39.970381 | 79 | 0.499002 | false |
ybenitezf/nstock | modules/app.py | 1 | 7648 | # coding: utf-8
from content_plugin import ContentPlugin
from z_whoosh import Whoosh
from gluon import current, URL
from gluon.storage import Storage
from gluon.cache import Cache
import perms
class Application(object):
def __init__(self):
super(Application, self).__init__()
# copy current context
self.db = current.db
self.T = current.T
self.auth = current.auth
self.request = current.request
self.response = current.response
self.session = current.session
self.mail = current.mail
self.conf = current.conf
self.registry = Storage()
self.cache = Cache(self.request)
def registerContentType(self, item_type, plug):
"""
Register a ContentPlugin for an Item Type
"""
assert isinstance(plug, ContentPlugin)
self.registry[item_type] = plug
plug.setController(self)
def getContentType(self, item_type):
return self.registry[item_type]
def getItemByUUID(self, unique_id):
db = self.db
query = (db.item.unique_id == unique_id)
item = db(query).select().first()
return item
def exportItem(self, item_id, export_dir):
"""
Put on export_dir all the item memta-data and content
"""
import os.path
import os
item = self.getItemByUUID(item_id)
meta_file = os.path.join(export_dir, "meta.json")
with open(meta_file, 'w') as f:
f.write(item.as_json())
ct = self.getContentType(item.item_type)
os.mkdir(os.path.join(export_dir, "content"))
ct.export(item, os.path.join(export_dir, "content"))
def canUpdateItem(self, unique_id, user=None):
item = self.getItemByUUID(unique_id)
desk = self.db(
self.db.desk.item_list.contains(item.id)).select().first()
is_owner = self.isOwner(unique_id, user=user) and (
desk.id == self.getUserDesk().id)
can_update_desk = self.auth.has_permission(
'update_items', self.db.desk, desk.id) or self.auth.has_permission(
'owner', self.db.desk, desk.id) or self.auth.has_permission(
'update', self.db.desk, desk.id)
return (is_owner or can_update_desk) and (item.id in desk.item_list)
def canReadItem(self, unique_id, user=None):
item = self.getItemByUUID(unique_id)
desk = self.db(
self.db.desk.item_list.contains(item.id)).select().first()
can_read_desk = self.auth.has_permission(
'read', self.db.desk, desk.id) or self.auth.has_permission(
'owner', self.db.desk, desk.id) or self.auth.has_permission(
'update', self.db.desk, desk.id)
return can_read_desk and (item.id in desk.item_list)
def isOwner(self, unique_id, user=None):
"""
Returns True if user is the owner of the item
"""
item = self.getItemByUUID(unique_id)
if item is None:
return False
if user is None:
return perms.isOwner(item.id)
return self.auth.has_permission(
'owner', self.db.item, record_id=item.id, user_id=user.id)
def getUserDesk(self, user=None):
db = self.db
auth = self.auth
if user is None:
user = auth.user
# setup user desk if necessary.
user_desk = db(
auth.accessible_query('owner', db.desk, user.id)).select().first()
if user_desk is None:
name = self.T("%s desk", (auth.user.first_name,))
desk_id = db.desk.insert(name=name)
g_id = auth.user_group(auth.user.id)
auth.add_permission(g_id, 'owner', db.desk, desk_id)
user_desk = db.desk(desk_id)
return user_desk
def indexItem(self, item_id, user=None):
"""
Add/update item to the user search index
"""
if user is None:
user = self.auth.user
item = self.getItemByUUID(item_id)
ct = self.getContentType(item.item_type)
text = ct.get_full_text(item)
w = Whoosh(str(user.id))
w.add_to_index(unicode(item_id), text)
def createItem(self, content_type, values):
db = self.db
auth = self.auth
values['item_type'] = content_type
item_id = db.item.insert(**db.item._filter_fields(values))
# give owner perm to the item
auth.add_permission(0, 'owner', db.item, item_id)
# add the item to the user desk
user_desk = self.getUserDesk()
item_list = user_desk.item_list
item_list.insert(0, item_id)
user_desk.update_record(item_list=item_list)
# --
# create te content instance
ct = self.getContentType(content_type)
ct.create_content(db.item(item_id))
# --
return db.item(item_id).unique_id
def getItemURL(self, unique_id):
item = self.getItemByUUID(unique_id)
c = "plugin_{}".format(item.item_type)
f = "index.html"
return URL(c=c, f=f, args=[item.unique_id])
def getContentChangesURL(self, unique_id):
item = self.getItemByUUID(unique_id)
c = "plugin_{}".format(item.item_type)
f = "changelog.html"
return URL(c=c, f=f, args=[item.unique_id])
def notifyChanges(self, item_id):
response = self.response
auth = self.auth
T = self.T
item = self.getItemByUUID(item_id)
message = response.render(
'changes_email.txt',
dict(item=item, user=auth.user)
)
subject = T("Changes on %s") % (item.headline,)
self.notifyCollaborators(
item.unique_id,
subject,
message
)
def getCollaborators(self, item_id, exclude_current=True):
"""
Given a item returns the list of user who have access to item.
"""
db = self.db
auth = self.auth
item = self.getItemByUUID(item_id)
desk = self.db(
self.db.desk.item_list.contains(item.id)).select().first()
query = (db.auth_permission.record_id == desk.id)
query &= (db.auth_permission.name != 'push_items')
query &= (db.auth_permission.table_name == db.desk)
query &= (db.auth_permission.group_id == db.auth_membership.group_id)
query &= (db.auth_user.id == db.auth_membership.user_id)
if exclude_current:
query &= (db.auth_user.id != auth.user.id)
return db(query).select(
db.auth_user.ALL,
distinct=True,
cache=(self.cache.ram, 30),
cacheable=True)
def notifyCollaborators(self, item_id, subject, message):
db = self.db
auth = self.auth
item = self.getItemByUUID(item_id)
myusers = self.getCollaborators(item.unique_id)
for u in myusers:
db.notification.insert(
subject=subject,
message_content=message,
from_user=auth.user.id,
to_user=u.id
)
def shareItem(self, item_id, src_desk, dst_desk):
"""
Move item_id from src_desk to dst_desk
"""
item = self.getItemByUUID(item_id)
src = self.db.desk(src_desk)
dst = self.db.desk(dst_desk)
src_list = src.item_list
src_list.remove(item.id)
src.update_record(item_list=src_list)
dst_list = dst.item_list
dst_list.insert(0, item.id)
dst.update_record(item_list=dst_list)
self.notifyChanges(item_id)
return
| mit | -3,492,867,501,608,093,000 | 31.683761 | 79 | 0.574922 | false |
li-ma/homework | zookeeper/kazoo-pub.py | 1 | 1154 | import random
import kazoo
from kazoo.client import KazooClient
from kazoo.handlers.eventlet import SequentialEventletHandler
from kazoo.retry import KazooRetry
from kazoo.recipe.watchers import ChildrenWatch
from oslo_serialization import jsonutils
_handler = SequentialEventletHandler()
_retry = KazooRetry(max_tries=3, delay=0.5, backoff=2,
sleep_func=_handler.sleep_func)
client = KazooClient(hosts='192.168.163.129:2181',
handler=_handler,
timeout=30,
connection_retry=_retry)
#import pdb
#pdb.set_trace()
abc = {'name': '99'}
node = str(random.randint(10, 1000))
client.start()
lports = client.get_children('/openstack/lport/')
# client.create('/openstack/lport/%s' % node, jsonutils.dumps(abc))
for lport in lports:
value,state = client.get('/openstack/lport/%s' % lport)
json_val = jsonutils.loads(value)
if json_val['name']:
json_val['name'] = str(int(json_val['name']) + 1)
else:
json_val['name'] = '0'
client.set('/openstack/lport/%s' % lport, jsonutils.dumps(json_val))
print "%s: %s" % (lport, json_val['name'])
| apache-2.0 | 1,515,694,696,601,710,000 | 31.055556 | 72 | 0.657712 | false |
beeverycreative/BEEweb | src/octoprint/util/__init__.py | 1 | 34374 | # coding=utf-8
from __future__ import absolute_import
"""
This module bundles commonly used utility methods or helper classes that are used in multiple places withing
OctoPrint's source code.
"""
from __future__ import absolute_import, division, print_function
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import os
import traceback
import sys
import re
import tempfile
import logging
import shutil
import threading
from functools import wraps
import warnings
import contextlib
try:
import queue
except ImportError:
import Queue as queue
logger = logging.getLogger(__name__)
def warning_decorator_factory(warning_type):
def specific_warning(message, stacklevel=1, since=None, includedoc=None, extenddoc=False):
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
# we need to increment the stacklevel by one because otherwise we'll get the location of our
# func_wrapper in the log, instead of our caller (which is the real caller of the wrapped function)
warnings.warn(message, warning_type, stacklevel=stacklevel + 1)
return func(*args, **kwargs)
if includedoc is not None and since is not None:
docstring = "\n.. deprecated:: {since}\n {message}\n\n".format(since=since, message=includedoc)
if extenddoc and hasattr(func_wrapper, "__doc__") and func_wrapper.__doc__ is not None:
docstring = func_wrapper.__doc__ + "\n" + docstring
func_wrapper.__doc__ = docstring
return func_wrapper
return decorator
return specific_warning
deprecated = warning_decorator_factory(DeprecationWarning)
"""
A decorator for deprecated methods. Logs a deprecation warning via Python's `:mod:`warnings` module including the
supplied ``message``. The call stack level used (for adding the source location of the offending call to the
warning) can be overridden using the optional ``stacklevel`` parameter. If both ``since`` and ``includedoc`` are
provided, a deprecation warning will also be added to the function's docstring by providing or extending its ``__doc__``
property.
Arguments:
message (string): The message to include in the deprecation warning.
stacklevel (int): Stack level for including the caller of the offending method in the logged warning. Defaults to 1,
meaning the direct caller of the method. It might make sense to increase this in case of the function call
happening dynamically from a fixed position to not shadow the real caller (e.g. in case of overridden
``getattr`` methods).
includedoc (string): Message about the deprecation to include in the wrapped function's docstring.
extenddoc (boolean): If True the original docstring of the wrapped function will be extended by the deprecation
message, if False (default) it will be replaced with the deprecation message.
since (string): Version since when the function was deprecated, must be present for the docstring to get extended.
Returns:
function: The wrapped function with the deprecation warnings in place.
"""
pending_deprecation = warning_decorator_factory(PendingDeprecationWarning)
"""
A decorator for methods pending deprecation. Logs a pending deprecation warning via Python's `:mod:`warnings` module
including the supplied ``message``. The call stack level used (for adding the source location of the offending call to
the warning) can be overridden using the optional ``stacklevel`` parameter. If both ``since`` and ``includedoc`` are
provided, a deprecation warning will also be added to the function's docstring by providing or extending its ``__doc__``
property.
Arguments:
message (string): The message to include in the deprecation warning.
stacklevel (int): Stack level for including the caller of the offending method in the logged warning. Defaults to 1,
meaning the direct caller of the method. It might make sense to increase this in case of the function call
happening dynamically from a fixed position to not shadow the real caller (e.g. in case of overridden
``getattr`` methods).
extenddoc (boolean): If True the original docstring of the wrapped function will be extended by the deprecation
message, if False (default) it will be replaced with the deprecation message.
includedoc (string): Message about the deprecation to include in the wrapped function's docstring.
since (string): Version since when the function was deprecated, must be present for the docstring to get extended.
Returns:
function: The wrapped function with the deprecation warnings in place.
"""
def get_formatted_size(num):
"""
Formats the given byte count as a human readable rounded size expressed in the most pressing unit among B(ytes),
K(ilo)B(ytes), M(ega)B(ytes), G(iga)B(ytes) and T(era)B(ytes), with one decimal place.
Based on http://stackoverflow.com/a/1094933/2028598
Arguments:
num (int): The byte count to format
Returns:
string: The formatted byte count.
"""
for x in ["B","KB","MB","GB"]:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, "TB")
def is_allowed_file(filename, extensions):
"""
Determines if the provided ``filename`` has one of the supplied ``extensions``. The check is done case-insensitive.
Arguments:
filename (string): The file name to check against the extensions.
extensions (list): The extensions to check against, a list of strings
Return:
boolean: True if the file name's extension matches one of the allowed extensions, False otherwise.
"""
return "." in filename and filename.rsplit(".", 1)[1].lower() in map(str.lower, extensions)
def get_formatted_timedelta(d):
"""
Formats a timedelta instance as "HH:MM:ss" and returns the resulting string.
Arguments:
d (datetime.timedelta): The timedelta instance to format
Returns:
string: The timedelta formatted as "HH:MM:ss"
"""
if d is None:
return None
hours = d.days * 24 + d.seconds // 3600
minutes = (d.seconds % 3600) // 60
seconds = d.seconds % 60
return "%02d:%02d:%02d" % (hours, minutes, seconds)
def get_formatted_datetime(d):
"""
Formats a datetime instance as "YYYY-mm-dd HH:MM" and returns the resulting string.
Arguments:
d (datetime.datetime): The datetime instance to format
Returns:
string: The datetime formatted as "YYYY-mm-dd HH:MM"
"""
if d is None:
return None
return d.strftime("%Y-%m-%d %H:%M")
def get_class(name):
"""
Retrieves the class object for a given fully qualified class name.
Taken from http://stackoverflow.com/a/452981/2028598.
Arguments:
name (string): The fully qualified class name, including all modules separated by ``.``
Returns:
type: The class if it could be found.
Raises:
AttributeError: The class could not be found.
"""
parts = name.split(".")
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def get_exception_string():
"""
Retrieves the exception info of the last raised exception and returns it as a string formatted as
``<exception type>: <exception message> @ <source file>:<function name>:<line number>``.
Returns:
string: The formatted exception information.
"""
locationInfo = traceback.extract_tb(sys.exc_info()[2])[0]
return "%s: '%s' @ %s:%s:%d" % (str(sys.exc_info()[0].__name__), str(sys.exc_info()[1]), os.path.basename(locationInfo[0]), locationInfo[2], locationInfo[1])
@deprecated("get_free_bytes has been deprecated and will be removed in the future",
includedoc="Replaced by `psutil.disk_usage <http://pythonhosted.org/psutil/#psutil.disk_usage>`_.",
since="1.2.5")
def get_free_bytes(path):
import psutil
return psutil.disk_usage(path).free
def get_dos_filename(input, existing_filenames=None, extension=None, whitelisted_extensions=None, **kwargs):
"""
Converts the provided input filename to a 8.3 DOS compatible filename. If ``existing_filenames`` is provided, the
conversion result will be guaranteed not to collide with any of the filenames provided thus.
Uses :func:`find_collision_free_name` internally.
Arguments:
input (string): The original filename incl. extension to convert to the 8.3 format.
existing_filenames (list): A list of existing filenames with which the generated 8.3 name must not collide.
Optional.
extension (string): The .3 file extension to use for the generated filename. If not provided, the extension of
the provided ``filename`` will simply be truncated to 3 characters.
whitelisted_extensions (list): A list of extensions on ``input`` that will be left as-is instead of
exchanging for ``extension``.
kwargs (dict): Additional keyword arguments to provide to :func:`find_collision_free_name`.
Returns:
string: A 8.3 compatible translation of the original filename, not colliding with the optionally provided
``existing_filenames`` and with the provided ``extension`` or the original extension shortened to
a maximum of 3 characters.
Raises:
ValueError: No 8.3 compatible name could be found that doesn't collide with the provided ``existing_filenames``.
Examples:
>>> get_dos_filename("test1234.gco")
u'test1234.gco'
>>> get_dos_filename("test1234.gcode")
u'test1234.gco'
>>> get_dos_filename("test12345.gco")
u'test12~1.gco'
>>> get_dos_filename("test1234.fnord", extension="gco")
u'test1234.gco'
>>> get_dos_filename("auto0.g", extension="gco")
u'auto0.gco'
>>> get_dos_filename("auto0.g", extension="gco", whitelisted_extensions=["g"])
u'auto0.g'
>>> get_dos_filename(None)
>>> get_dos_filename("foo")
u'foo'
"""
if input is None:
return None
if existing_filenames is None:
existing_filenames = []
if extension is not None:
extension = extension.lower()
if whitelisted_extensions is None:
whitelisted_extensions = []
filename, ext = os.path.splitext(input)
ext = ext.lower()
ext = ext[1:] if ext.startswith(".") else ext
if ext in whitelisted_extensions or extension is None:
extension = ext
return find_collision_free_name(filename, extension, existing_filenames, **kwargs)
def find_collision_free_name(filename, extension, existing_filenames, max_power=2):
"""
Tries to find a collision free translation of "<filename>.<extension>" to the 8.3 DOS compatible format,
preventing collisions with any of the ``existing_filenames``.
First strips all of ``."/\\[]:;=,`` from the filename and extensions, converts them to lower case and truncates
the ``extension`` to a maximum length of 3 characters.
If the filename is already equal or less than 8 characters in length after that procedure and "<filename>.<extension>"
are not contained in the ``existing_files``, that concatenation will be returned as the result.
If not, the following algorithm will be applied to try to find a collision free name::
set counter := power := 1
while counter < 10^max_power:
set truncated := substr(filename, 0, 6 - power + 1) + "~" + counter
set result := "<truncated>.<extension>"
if result is collision free:
return result
counter++
if counter >= 10 ** power:
power++
raise ValueError
This will basically -- for a given original filename of ``some_filename`` and an extension of ``gco`` -- iterate
through names of the format ``some_f~1.gco``, ``some_f~2.gco``, ..., ``some_~10.gco``, ``some_~11.gco``, ...,
``<prefix>~<n>.gco`` for ``n`` less than 10 ^ ``max_power``, returning as soon as one is found that is not colliding.
Arguments:
filename (string): The filename without the extension to convert to 8.3.
extension (string): The extension to convert to 8.3 -- will be truncated to 3 characters if it's longer than
that.
existing_filenames (list): A list of existing filenames to prevent name collisions with.
max_power (int): Limits the possible attempts of generating a collision free name to 10 ^ ``max_power``
variations. Defaults to 2, so the name generation will maximally reach ``<name>~99.<ext>`` before
aborting and raising an exception.
Returns:
string: A 8.3 representation of the provided original filename, ensured to not collide with the provided
``existing_filenames``
Raises:
ValueError: No collision free name could be found.
Examples:
>>> find_collision_free_name("test1234", "gco", [])
u'test1234.gco'
>>> find_collision_free_name("test1234", "gcode", [])
u'test1234.gco'
>>> find_collision_free_name("test12345", "gco", [])
u'test12~1.gco'
>>> find_collision_free_name("test 123", "gco", [])
u'test_123.gco'
>>> find_collision_free_name("test1234", "g o", [])
u'test1234.g_o'
>>> find_collision_free_name("test12345", "gco", ["test12~1.gco"])
u'test12~2.gco'
>>> many_files = ["test12~{}.gco".format(x) for x in range(10)[1:]]
>>> find_collision_free_name("test12345", "gco", many_files)
u'test1~10.gco'
>>> many_more_files = many_files + ["test1~{}.gco".format(x) for x in range(10, 99)]
>>> find_collision_free_name("test12345", "gco", many_more_files)
u'test1~99.gco'
>>> many_more_files_plus_one = many_more_files + ["test1~99.gco"]
>>> find_collision_free_name("test12345", "gco", many_more_files_plus_one)
Traceback (most recent call last):
...
ValueError: Can't create a collision free filename
>>> find_collision_free_name("test12345", "gco", many_more_files_plus_one, max_power=3)
u'test~100.gco'
"""
if not isinstance(filename, unicode):
filename = unicode(filename)
if not isinstance(extension, unicode):
extension = unicode(extension)
def make_valid(text):
return re.sub(r"\s+", "_", text.translate({ord(i):None for i in ".\"/\\[]:;=,"})).lower()
filename = make_valid(filename)
extension = make_valid(extension)
extension = extension[:3] if len(extension) > 3 else extension
full_name_format = u"{filename}.{extension}" if extension else u"{filename}"
result = full_name_format.format(filename=filename,
extension=extension)
if len(filename) <= 8 and not result in existing_filenames:
# early exit
return result
counter = 1
power = 1
prefix_format = u"{segment}~{counter}"
while counter < (10 ** max_power):
prefix = prefix_format.format(segment=filename[:(6 - power + 1)], counter=str(counter))
result = full_name_format.format(filename=prefix,
extension=extension)
if result not in existing_filenames:
return result
counter += 1
if counter >= 10 ** power:
power += 1
raise ValueError("Can't create a collision free filename")
def silent_remove(file):
"""
Silently removes a file. Does not raise an error if the file doesn't exist.
Arguments:
file (string): The path of the file to be removed
"""
try:
os.remove(file)
except OSError:
pass
def sanitize_ascii(line):
if not isinstance(line, basestring):
raise ValueError("Expected either str or unicode but got {} instead".format(line.__class__.__name__ if line is not None else None))
return to_unicode(line, encoding="ascii", errors="replace").rstrip()
def filter_non_ascii(line):
"""
Filter predicate to test if a line contains non ASCII characters.
Arguments:
line (string): The line to test
Returns:
boolean: True if the line contains non ASCII characters, False otherwise.
"""
try:
to_str(to_unicode(line, encoding="ascii"), encoding="ascii")
return False
except ValueError:
return True
def to_str(s_or_u, encoding="utf-8", errors="strict"):
"""Make sure ``s_or_u`` is a str."""
if isinstance(s_or_u, unicode):
return s_or_u.encode(encoding, errors=errors)
else:
return s_or_u
def to_unicode(s_or_u, encoding="utf-8", errors="strict"):
"""Make sure ``s_or_u`` is a unicode string."""
if isinstance(s_or_u, str):
return s_or_u.decode(encoding, errors=errors)
else:
return s_or_u
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
Taken from http://stackoverflow.com/a/312464/2028598
"""
for i in range(0, len(l), n):
yield l[i:i+n]
def is_running_from_source():
root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
return os.path.isdir(os.path.join(root, "src")) and os.path.isfile(os.path.join(root, "setup.py"))
def dict_merge(a, b):
"""
Recursively deep-merges two dictionaries.
Taken from https://www.xormedia.com/recursively-merge-dictionaries-in-python/
Example::
>>> a = dict(foo="foo", bar="bar", fnord=dict(a=1))
>>> b = dict(foo="other foo", fnord=dict(b=2, l=["some", "list"]))
>>> expected = dict(foo="other foo", bar="bar", fnord=dict(a=1, b=2, l=["some", "list"]))
>>> dict_merge(a, b) == expected
True
>>> dict_merge(a, None) == a
True
>>> dict_merge(None, b) == b
True
>>> dict_merge(None, None) == dict()
True
Arguments:
a (dict): The dictionary to merge ``b`` into
b (dict): The dictionary to merge into ``a``
Returns:
dict: ``b`` deep-merged into ``a``
"""
from copy import deepcopy
if a is None:
a = dict()
if b is None:
b = dict()
if not isinstance(b, dict):
return b
result = deepcopy(a)
for k, v in b.items():
if k in result and isinstance(result[k], dict):
result[k] = dict_merge(result[k], v)
else:
result[k] = deepcopy(v)
return result
def dict_sanitize(a, b):
"""
Recursively deep-sanitizes ``a`` based on ``b``, removing all keys (and
associated values) from ``a`` that do not appear in ``b``.
Example::
>>> a = dict(foo="foo", bar="bar", fnord=dict(a=1, b=2, l=["some", "list"]))
>>> b = dict(foo=None, fnord=dict(a=None, b=None))
>>> expected = dict(foo="foo", fnord=dict(a=1, b=2))
>>> dict_sanitize(a, b) == expected
True
>>> dict_clean(a, b) == expected
True
Arguments:
a (dict): The dictionary to clean against ``b``.
b (dict): The dictionary containing the key structure to clean from ``a``.
Results:
dict: A new dict based on ``a`` with all keys (and corresponding values) found in ``b`` removed.
"""
from copy import deepcopy
if not isinstance(b, dict):
return a
result = deepcopy(a)
for k, v in a.items():
if not k in b:
del result[k]
elif isinstance(v, dict):
result[k] = dict_sanitize(v, b[k])
else:
result[k] = deepcopy(v)
return result
dict_clean = deprecated("dict_clean has been renamed to dict_sanitize",
includedoc="Replaced by :func:`dict_sanitize`")(dict_sanitize)
def dict_minimal_mergediff(source, target):
"""
Recursively calculates the minimal dict that would be needed to be deep merged with
a in order to produce the same result as deep merging a and b.
Example::
>>> a = dict(foo=dict(a=1, b=2), bar=dict(c=3, d=4))
>>> b = dict(bar=dict(c=3, d=5), fnord=None)
>>> c = dict_minimal_mergediff(a, b)
>>> c == dict(bar=dict(d=5), fnord=None)
True
>>> dict_merge(a, c) == dict_merge(a, b)
True
Arguments:
source (dict): Source dictionary
target (dict): Dictionary to compare to source dictionary and derive diff for
Returns:
dict: The minimal dictionary to deep merge on ``source`` to get the same result
as deep merging ``target`` on ``source``.
"""
if not isinstance(source, dict) or not isinstance(target, dict):
raise ValueError("source and target must be dictionaries")
if source == target:
# shortcut: if both are equal, we return an empty dict as result
return dict()
from copy import deepcopy
all_keys = set(source.keys() + target.keys())
result = dict()
for k in all_keys:
if k not in target:
# key not contained in b => not contained in result
continue
if k in source:
# key is present in both dicts, we have to take a look at the value
value_source = source[k]
value_target = target[k]
if value_source != value_target:
# we only need to look further if the values are not equal
if isinstance(value_source, dict) and isinstance(value_target, dict):
# both are dicts => deeper down it goes into the rabbit hole
result[k] = dict_minimal_mergediff(value_source, value_target)
else:
# new b wins over old a
result[k] = deepcopy(value_target)
else:
# key is new, add it
result[k] = deepcopy(target[k])
return result
def dict_contains_keys(keys, dictionary):
"""
Recursively deep-checks if ``dictionary`` contains all keys found in ``keys``.
Example::
>>> positive = dict(foo="some_other_bar", fnord=dict(b=100))
>>> negative = dict(foo="some_other_bar", fnord=dict(b=100, d=20))
>>> dictionary = dict(foo="bar", fnord=dict(a=1, b=2, c=3))
>>> dict_contains_keys(positive, dictionary)
True
>>> dict_contains_keys(negative, dictionary)
False
Arguments:
a (dict): The dictionary to check for the keys from ``b``.
b (dict): The dictionary whose keys to check ``a`` for.
Returns:
boolean: True if all keys found in ``b`` are also present in ``a``, False otherwise.
"""
if not isinstance(keys, dict) or not isinstance(dictionary, dict):
return False
for k, v in keys.items():
if not k in dictionary:
return False
elif isinstance(v, dict):
if not dict_contains_keys(v, dictionary[k]):
return False
return True
class fallback_dict(dict):
def __init__(self, custom, *fallbacks):
self.custom = custom
self.fallbacks = fallbacks
def __getitem__(self, item):
for dictionary in self._all():
if item in dictionary:
return dictionary[item]
raise KeyError()
def __setitem__(self, key, value):
self.custom[key] = value
def __delitem__(self, key):
for dictionary in self._all():
if key in dictionary:
del dictionary[key]
def keys(self):
result = set()
for dictionary in self._all():
result += dictionary.keys()
return result
def _all(self):
return [self.custom] + list(self.fallbacks)
def dict_filter(dictionary, filter_function):
"""
Filters a dictionary with the provided filter_function
Example::
>>> data = dict(key1="value1", key2="value2", other_key="other_value", foo="bar", bar="foo")
>>> dict_filter(data, lambda k, v: k.startswith("key")) == dict(key1="value1", key2="value2")
True
>>> dict_filter(data, lambda k, v: v.startswith("value")) == dict(key1="value1", key2="value2")
True
>>> dict_filter(data, lambda k, v: k == "foo" or v == "foo") == dict(foo="bar", bar="foo")
True
>>> dict_filter(data, lambda k, v: False) == dict()
True
>>> dict_filter(data, lambda k, v: True) == data
True
>>> dict_filter(None, lambda k, v: True)
Traceback (most recent call last):
...
AssertionError
>>> dict_filter(data, None)
Traceback (most recent call last):
...
AssertionError
Arguments:
dictionary (dict): The dictionary to filter
filter_function (callable): The filter function to apply, called with key and
value of an entry in the dictionary, must return ``True`` for values to
keep and ``False`` for values to strip
Returns:
dict: A shallow copy of the provided dictionary, stripped of the key-value-pairs
for which the ``filter_function`` returned ``False``
"""
assert isinstance(dictionary, dict)
assert callable(filter_function)
return dict((k, v) for k, v in dictionary.items() if filter_function(k, v))
def parsePropertiesFile(filePath):
"""
Parses a .properties file to a dictonary object
:param filePath: complete path to the file
:return:
"""
separator = "="
result = {}
from os.path import isfile
if not isfile(filePath):
return
with open(filePath) as f:
for line in f:
if separator in line:
# Find the name and value by splitting the string
name, value = line.split(separator, 1)
# strip() removes white space from the ends of strings
result[name.strip()] = value.strip()
return result
class Object(object):
pass
def interface_addresses(family=None):
"""
Retrieves all of the host's network interface addresses.
"""
import netifaces
if not family:
family = netifaces.AF_INET
for interface in netifaces.interfaces():
try:
ifaddresses = netifaces.ifaddresses(interface)
except:
continue
if family in ifaddresses:
for ifaddress in ifaddresses[family]:
if not ifaddress["addr"].startswith("169.254."):
yield ifaddress["addr"]
def address_for_client(host, port):
"""
Determines the address of the network interface on this host needed to connect to the indicated client host and port.
"""
import socket
for address in interface_addresses():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((address, 0))
sock.connect((host, port))
return address
except:
continue
@contextlib.contextmanager
def atomic_write(filename, mode="w+b", prefix="tmp", suffix="", permissions=0o644, max_permissions=0o777):
if os.path.exists(filename):
permissions |= os.stat(filename).st_mode
permissions &= max_permissions
temp_config = tempfile.NamedTemporaryFile(mode=mode, prefix=prefix, suffix=suffix, delete=False)
try:
yield temp_config
finally:
temp_config.close()
os.chmod(temp_config.name, permissions)
shutil.move(temp_config.name, filename)
@contextlib.contextmanager
def tempdir(ignore_errors=False, onerror=None, **kwargs):
import tempfile
import shutil
dirpath = tempfile.mkdtemp(**kwargs)
try:
yield dirpath
finally:
shutil.rmtree(dirpath, ignore_errors=ignore_errors, onerror=onerror)
@contextlib.contextmanager
def temppath(prefix=None, suffix=""):
import tempfile
temp = tempfile.NamedTemporaryFile(prefix=prefix if prefix is not None else tempfile.template,
suffix=suffix,
delete=False)
try:
temp.close()
yield temp.name
finally:
os.remove(temp.name)
def bom_aware_open(filename, encoding="ascii", mode="r", **kwargs):
import codecs
codec = codecs.lookup(encoding)
encoding = codec.name
if kwargs is None:
kwargs = dict()
potential_bom_attribute = "BOM_" + codec.name.replace("utf-", "utf").upper()
if "r" in mode and hasattr(codecs, potential_bom_attribute):
# these encodings might have a BOM, so let's see if there is one
bom = getattr(codecs, potential_bom_attribute)
with open(filename, "rb") as f:
header = f.read(4)
if header.startswith(bom):
encoding += "-sig"
return codecs.open(filename, encoding=encoding, mode=mode, **kwargs)
def is_hidden_path(path):
if path is None:
# we define a None path as not hidden here
return False
filename = os.path.basename(path)
if filename.startswith("."):
# filenames starting with a . are hidden
return True
if sys.platform == "win32":
# if we are running on windows we also try to read the hidden file
# attribute via the windows api
try:
import ctypes
attrs = ctypes.windll.kernel32.GetFileAttributesW(unicode(path))
assert attrs != -1 # INVALID_FILE_ATTRIBUTES == -1
return bool(attrs & 2) # FILE_ATTRIBUTE_HIDDEN == 2
except (AttributeError, AssertionError):
pass
# if we reach that point, the path is not hidden
return False
class RepeatedTimer(threading.Thread):
"""
This class represents an action that should be run repeatedly in an interval. It is similar to python's
own :class:`threading.Timer` class, but instead of only running once the ``function`` will be run again and again,
sleeping the stated ``interval`` in between.
RepeatedTimers are started, as with threads, by calling their ``start()`` method. The timer can be stopped (in
between runs) by calling the :func:`cancel` method. The interval the time waited before execution of a loop may
not be exactly the same as the interval specified by the user.
For example:
.. code-block:: python
def hello():
print("Hello World!")
t = RepeatedTimer(1.0, hello)
t.start() # prints "Hello World!" every second
Another example with dynamic interval and loop condition:
.. code-block:: python
count = 0
maximum = 5
factor = 1
def interval():
global count
global factor
return count * factor
def condition():
global count
global maximum
return count <= maximum
def hello():
print("Hello World!")
global count
count += 1
t = RepeatedTimer(interval, hello, run_first=True, condition=condition)
t.start() # prints "Hello World!" 5 times, printing the first one
# directly, then waiting 1, 2, 3, 4s in between (adaptive interval)
Arguments:
interval (float or callable): The interval between each ``function`` call, in seconds. Can also be a callable
returning the interval to use, in case the interval is not static.
function (callable): The function to call.
args (list or tuple): The arguments for the ``function`` call. Defaults to an empty list.
kwargs (dict): The keyword arguments for the ``function`` call. Defaults to an empty dict.
run_first (boolean): If set to True, the function will be run for the first time *before* the first wait period.
If set to False (the default), the function will be run for the first time *after* the first wait period.
condition (callable): Condition that needs to be True for loop to continue. Defaults to ``lambda: True``.
on_condition_false (callable): Callback to call when the timer finishes due to condition becoming false. Will
be called before the ``on_finish`` callback.
on_cancelled (callable): Callback to call when the timer finishes due to being cancelled. Will be called
before the ``on_finish`` callback.
on_finish (callable): Callback to call when the timer finishes, either due to being cancelled or since
the condition became false.
daemon (bool): daemon flag to set on underlying thread.
"""
def __init__(self, interval, function, args=None, kwargs=None,
run_first=False, condition=None, on_condition_false=None,
on_cancelled=None, on_finish=None, daemon=True):
threading.Thread.__init__(self)
if args is None:
args = []
if kwargs is None:
kwargs = dict()
if condition is None:
condition = lambda: True
if not callable(interval):
self.interval = lambda: interval
else:
self.interval = interval
self.function = function
self.finished = threading.Event()
self.args = args
self.kwargs = kwargs
self.run_first = run_first
self.condition = condition
self.on_condition_false = on_condition_false
self.on_cancelled = on_cancelled
self.on_finish = on_finish
self.daemon = daemon
def cancel(self):
self._finish(self.on_cancelled)
def run(self):
while self.condition():
if self.run_first:
# if we are to run the function BEFORE waiting for the first time
self.function(*self.args, **self.kwargs)
# make sure our condition is still met before running into the downtime
if not self.condition():
break
# wait, but break if we are cancelled
self.finished.wait(self.interval())
if self.finished.is_set():
return
if not self.run_first:
# if we are to run the function AFTER waiting for the first time
self.function(*self.args, **self.kwargs)
# we'll only get here if the condition was false
self._finish(self.on_condition_false)
def _finish(self, *callbacks):
self.finished.set()
for callback in callbacks:
if not callable(callback):
continue
callback()
if callable(self.on_finish):
self.on_finish()
class CountedEvent(object):
def __init__(self, value=0, maximum=None, **kwargs):
self._counter = 0
self._max = kwargs.get("max", maximum)
self._mutex = threading.Lock()
self._event = threading.Event()
self._internal_set(value)
def set(self):
with self._mutex:
self._internal_set(self._counter + 1)
def clear(self, completely=False):
with self._mutex:
if completely:
self._internal_set(0)
else:
self._internal_set(self._counter - 1)
def wait(self, timeout=None):
self._event.wait(timeout)
def blocked(self):
with self._mutex:
return self._counter == 0
def _internal_set(self, value):
self._counter = value
if self._counter <= 0:
self._counter = 0
self._event.clear()
else:
if self._max is not None and self._counter > self._max:
self._counter = self._max
self._event.set()
class InvariantContainer(object):
def __init__(self, initial_data=None, guarantee_invariant=None):
from collections import Iterable
from threading import RLock
if guarantee_invariant is None:
guarantee_invariant = lambda data: data
self._data = []
self._mutex = RLock()
self._invariant = guarantee_invariant
if initial_data is not None and isinstance(initial_data, Iterable):
for item in initial_data:
self.append(item)
def append(self, item):
with self._mutex:
self._data.append(item)
self._data = self._invariant(self._data)
def remove(self, item):
with self._mutex:
self._data.remove(item)
self._data = self._invariant(self._data)
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.__iter__()
class TypedQueue(queue.Queue):
def __init__(self, maxsize=0):
queue.Queue.__init__(self, maxsize=maxsize)
self._lookup = set()
def put(self, item, item_type=None, *args, **kwargs):
queue.Queue.put(self, (item, item_type), *args, **kwargs)
def get(self, *args, **kwargs):
item, _ = queue.Queue.get(self, *args, **kwargs)
return item
def _put(self, item):
_, item_type = item
if item_type is not None:
if item_type in self._lookup:
raise TypeAlreadyInQueue(item_type, "Type {} is already in queue".format(item_type))
else:
self._lookup.add(item_type)
queue.Queue._put(self, item)
def _get(self):
item = queue.Queue._get(self)
_, item_type = item
if item_type is not None:
self._lookup.discard(item_type)
return item
class TypeAlreadyInQueue(Exception):
def __init__(self, t, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.type = t
| agpl-3.0 | -965,555,217,127,192,000 | 30.021661 | 158 | 0.678663 | false |
dongsupark/seafile-fuse-client | seafilefuse.py | 1 | 14894 | #!/usr/bin/env python
"""
FUSE-based client for Seafile
- written by Dongsu Park <[email protected]>
(inspired by copy-fuse <https://github.com/copy-app/copy-fuse>)
A simple client for seafile.com, implemented via FUSE.
This tool allows a Linux/MacOSX client to mount a seafile cloud drive on a
local filesystem.
Quickstart usage:
$ mkdir -p /mnt/seafile
$ ./seafilefuse.py "http://127.0.0.1:8000" [email protected] "testtest" /mnt/seafile
(where server URL is "http://127.0.0.1:8000", username is [email protected],
and password is "testtest".)
To unmount it:
$ fusermount -u /mnt/seafile
"""
from errno import ENOENT, EIO
from stat import S_IFDIR, S_IFREG
from sys import argv, exit, stderr
import os
import argparse
import tempfile
import time
import hashlib
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn
from seafileapi.client import SeafileApiClient
from seafileapi.exceptions import ClientHttpError, DoesNotExist
from seafileapi.files import SeafDir, SeafFile
from seafileapi.repo import Repo
from seafileapi.repos import Repos
# global configurable variables to be connected to a Seafile server.
sf_server_url="http://127.0.0.1:8000"
sf_username="[email protected]"
sf_password="testtest"
sf_mount_point="/mnt/seafile"
repo_id_len=36
cache_ttl=10
def seafile_read_envs():
global sf_server_url, sf_username, sf_password, sf_mount_point
if os.environ.get('SEAFILE_TEST_SERVER_ADDRESS') is not None:
sf_server_url = os.environ['SEAFILE_TEST_SERVER_ADDRESS']
if os.environ.get('SEAFILE_TEST_USERNAME') is not None:
sf_username = os.environ['SEAFILE_TEST_USERNAME']
if os.environ.get('SEAFILE_TEST_PASSWORD') is not None:
sf_password = os.environ['SEAFILE_TEST_PASSWORD']
if os.environ.get('SEAFILE_TEST_MOUNT_POINT') is not None:
sf_mount_point = os.environ['SEAFILE_TEST_MOUNT_POINT']
def seafile_list_repos(client):
global repo_id_len
repos = client.repos.list_repos()
for repo in repos:
assert len(repo.id) == repo_id_len
return repos
def seafile_find_repo(repos, repoid=None):
if repoid is None:
# just return the first repo if no repo is matched
return repos[0]
for tmprepo in repos:
if tmprepo.id == repoid:
return tmprepo
# not found, raise exception
raise FuseOSError(ENOENT)
class SeafileCache:
"""class for handling caches of file attributes as well as expiration time.
SeafileCache instances must be initialized by SeafileFUSE.
"""
def __init__(self, repo):
self.attrcache = {}
self.expirecache = {}
self.currepo = repo
def __str__(self, msg):
print 'SeafileCache: %s' % msg
def list_objects(self, path, ttl=cache_ttl):
global cache_ttl
# check expiration time cache
if path in self.expirecache:
if self.expirecache[path] >= time.time():
return self.attrcache[path]
self.attrcache[path] = {}
try:
parentdir = self.currepo.get_dir(path)
entries = parentdir.ls(force_refresh=True)
except ClientHttpError as err:
print "list_objects: err: " + str(err)
return self.attrcache[path]
except IOError as err:
print "list_objects: err: " + str(err)
return self.attrcache[path]
for entry in entries:
#name = os.path.basename(entry.path).encode('utf8')
name = os.path.basename(entry.path)
self.add_attrcache(path, name, entry.isdir, entry.size)
# update expiration time cache
self.expirecache[path] = time.time() + ttl
return self.attrcache[path]
def add_attrcache(self, pdirpath, filename, isdir=False, size=0):
"""adds a new cache entry to self.attrcache, no matter if the entry for
the path already exists.
"""
if isdir:
ftype = 'dir'
else:
ftype = 'file'
self.attrcache[pdirpath][filename] = \
{'name': filename, 'type': ftype, 'size': size, 'ctime': time.time(), 'mtime': time.time()}
def update_attrcache(self, pdirpath, filename, isdir=False, size=0):
"""update an existing cache entry in self.attrcache, only if it
already exists for the path as a key.
"""
if pdirpath in self.attrcache:
self.add_attrcache(pdirpath, filename, isdir, size)
class SeafileFUSE(LoggingMixIn, Operations):
"""Main class of the seafile client filesystem based on FUSE.
On initialization, basic connections are established via SeafileApiClient.
Only one seafile repository is to be selected for further operations.
SeafileCache instance must be initialized from the init method as well.
"""
def __init__(self, server=sf_server_url, username=sf_username, \
password=sf_password, repoid=None, logfile=None):
try:
self.seafileapi_client = SeafileApiClient(server, username, password)
except ClientHttpError as err:
print __str__(err)
except DoesNotExist as err:
print __str__(err)
self.logfile = logfile
self.fobjdict = {}
self.repos = seafile_list_repos(self.seafileapi_client)
self.currepo = seafile_find_repo(self.repos, repoid)
print "Current repo's ID: " + self.currepo.id
self.seafile_cache = SeafileCache(self.currepo)
def __str__(self, msg):
print 'SeafileFUSE: %s' % msg
def file_close(self, path):
if path in self.fobjdict:
if self.fobjdict[path]['modified'] == True:
self.file_upload(path)
self.fobjdict[path]['object'].close()
del self.fobjdict[path]
def file_get(self, path, download=True):
# print "file_get: " + path
if path in self.fobjdict:
return self.fobjdict[path]
if download == True:
sfileobj = self.currepo.get_file(path)
fcontent = sfileobj.get_content()
else:
fcontent = ''
f = tempfile.NamedTemporaryFile(delete=False)
f.write(fcontent)
self.fobjdict[path] = {'object': f, 'modified': False}
# print "written to tmpfile " + f.name
# print "fcontent: " + fcontent
return self.fobjdict[path]
def file_rename(self, old, new):
if old in self.fobjdict:
self.fobjdict[new] = self.fobjdict[old]
del self.fobjdict[old]
def file_upload(self, path):
if path not in self.fobjdict:
print "file_upload: path(" + path + ") not found in cache"
raise FuseOSError(EIO)
fileobj = self.file_get(path)
if fileobj['modified'] == False:
# print "not doing upload. return true"
return True
fp = fileobj['object']
fp.seek(0)
if path == '/':
pdirpath = '/'
else:
pdirpath = os.path.dirname(path)
targetdir = self.currepo.get_dir(pdirpath)
nfilename = os.path.basename(path)
try:
targetfile = targetdir.upload(fp, nfilename)
except ClientHttpError as err:
print __str__("err: " + str(err))
return 0
except IOError as err:
print __str__("err: " + str(err))
return 0
except DoesNotExist as err:
print __str__("err: " + str(err))
return 0
# print "uploaded " + nfilename
fileobj['modified'] = False
def getattr(self, path, fh=None):
# print "getattr: " + path
if path == '/':
st = dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
st['st_ctime'] = st['st_atime'] = st['st_mtime'] = time.time()
else:
name = str(os.path.basename(path))
objects = self.seafile_cache.list_objects(os.path.dirname(path))
if name not in objects:
raise FuseOSError(ENOENT)
elif objects[name]['type'] == 'file':
st = dict(st_mode=(S_IFREG | 0644), st_size=int(objects[name]['size']))
else:
st = dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
st['st_ctime'] = st['st_atime'] = objects[name]['ctime']
st['st_mtime'] = objects[name]['mtime']
st['st_uid'] = os.getuid()
st['st_gid'] = os.getgid()
return st
def open(self, path, flags):
# print "open: " + path
self.file_get(path)
return 0
def flush(self, path, fh):
# print "flush: " + path
try:
if path in self.fobjdict:
if self.fobjdict[path]['modified'] == True:
self.file_upload(path)
except DoesNotExist as err:
print __str__("flush: err: " + str(err))
def fsync(self, path, datasync, fh):
# print "fsync: " + path
try:
if path in self.fobjdict:
if self.fobjdict[path]['modified'] == True:
self.file_upload(path)
except DoesNotExist as err:
print __str__("fsync: err: " + str(err))
def read(self, path, size, offset, fh):
f = self.file_get(path)['object']
f.seek(offset)
return f.read(size)
def write(self, path, data, offset, fh):
# print "write: " + path
fileobj = self.file_get(path)
f = fileobj['object']
f.seek(offset)
f.write(data)
fileobj['modified'] = True
return len(data)
def readdir(self, path, fh):
# print "readdir: " + path
objsdict = self.seafile_cache.list_objects(path);
outlist = ['.', '..']
for obj in objsdict:
outlist.append(obj)
return outlist
def rename(self, oldname, newname):
# print "rename: " + oldname + " to " + newname
self.file_rename(oldname, newname)
ofilename = os.path.basename(oldname)
podirname = os.path.dirname(oldname)
pndirname = os.path.dirname(newname)
targetfile = self.currepo.get_file(oldname)
if podirname != pndirname:
# use moveTo operation for moving it to a different directory
targetfile.moveTo(pndirname, dst_repo=None)
tmpname = os.path.join(pndirname, ofilename)
targetfile = self.currepo.get_file(tmpname)
# simply call a rename method
targetfile.rename(newname.strip("/"))
return 0
def create(self, path, mode):
# print "create: " + path
nfilename = os.path.basename(path)
pdirpath = os.path.dirname(path)
parentdir = self.currepo.get_dir(pdirpath)
parentdir.ls()
self.seafile_cache.update_attrcache(pdirpath, nfilename, isdir=False)
self.file_get(path, download=False)
self.file_upload(path)
return 0
def unlink(self, path):
# print "unlink: " + path
if path == '/':
raise FuseOSError(EFAULT)
targetfile = self.currepo.get_file(path)
targetfile.delete()
tfilename = os.path.basename(path)
pdirpath = os.path.dirname(path)
self.seafile_cache.update_attrcache(pdirpath, tfilename, isdir=False)
return 0
def mkdir(self, path, mode):
# print "mkdir: " + path
ndirname = os.path.basename(path)
pdirpath = os.path.dirname(path)
parentdir = self.currepo.get_dir(pdirpath)
parentdir.ls()
self.seafile_cache.update_attrcache(pdirpath, ndirname, isdir=True)
newdir = parentdir.mkdir(ndirname)
return 0
def rmdir(self, path):
# print "rmdir: " + path
if path == '/':
raise FuseOSError(EFAULT)
targetdir = self.currepo.get_dir(path)
targetdir.delete()
tdirname = os.path.basename(path)
pdirpath = os.path.dirname(path)
self.seafile_cache.update_attrcache(pdirpath, tdirname, isdir=True)
return 0
def release(self, path, fh):
# print "release: " + path
try:
if self.fobjdict[path]['modified'] == True:
self.file_close(path)
except DoesNotExist as err:
print __str__("release, err: " + str(err))
def truncate(self, path, length, fh=None):
# print "truncate: " + path
f = self.file_get(path)['object']
f.truncate(length)
# Disable unused operations:
access = None
chmod = None
chown = None
getxattr = None
listxattr = None
opendir = None
releasedir = None
statfs = None
def main():
parser = argparse.ArgumentParser(
description='Fuse filesystem for seafile clients')
parser.add_argument(
'-d', '--debug', default=False, action='store_true',
help='turn on debug output (implies -f)')
parser.add_argument(
'-f', '--foreground', default=False, action='store_true',
help='run in foreground')
repoid = None
parser.add_argument(
'-r', '--repoid', type=str,
help='specify ID of the remote repository (if not set, auto-choose the 1st repo)')
parser.add_argument(
'-o', '--options', help='add extra fuse options (see "man fuse")')
parser.add_argument(
'server_url', metavar='SERVERURL', help='server_url')
parser.add_argument(
'username', metavar='EMAIL', help='username/email')
parser.add_argument(
'password', metavar='PASS', help='password')
parser.add_argument(
'mount_point', metavar='MNTDIR', help='directory to mount filesystem at')
args = parser.parse_args(argv[1:])
seafile_read_envs()
u_server_url = args.__dict__.pop('server_url')
if u_server_url is not None:
sf_server_url = u_server_url
u_username = args.__dict__.pop('username')
if u_username is not None:
sf_username = u_username
u_password = args.__dict__.pop('password')
if u_password is not None:
sf_password = u_password
u_mount_point = args.__dict__.pop('mount_point')
if u_mount_point is not None:
sf_mount_point = u_mount_point
u_repoid = args.__dict__.pop('repoid')
# parse options
options_str = args.__dict__.pop('options')
options = dict([(kv.split('=', 1)+[True])[:2] for kv in (options_str and options_str.split(',')) or []])
fuse_args = args.__dict__.copy()
fuse_args.update(options)
logfile = None
if fuse_args.get('debug', False) == True:
# send to stderr same as where fuse lib sends debug messages
logfile = stderr
fuse = FUSE(SeafileFUSE(server=sf_server_url, username=sf_username, \
password=sf_password, repoid=u_repoid, logfile=logfile), \
sf_mount_point, **fuse_args)
if __name__ == "__main__":
main()
| apache-2.0 | -2,199,827,109,060,702,500 | 30.555085 | 108 | 0.594535 | false |
sneeu/stationary | stationary.py | 1 | 6358 | import config
import shutil
import jinja2
import markdown2 as markdown
import os.path
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
import re
import yaml
POST_HEADER_SEP_RE = re.compile('^---$', re.MULTILINE)
DATE_FORMAT = '%Y-%m-%d %H:%M'
SOURCECODE_RE = re.compile(
r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
def pygments_preprocess(lines):
formatter = HtmlFormatter(noclasses=False)
def repl(m):
try:
lexer = get_lexer_by_name(m.group(1))
except ValueError:
lexer = TextLexer()
code = highlight(m.group(2), lexer, formatter)
code = code.replace('\n\n', '\n \n').strip().replace('\n', '<br />')
return '\n\n<div class="code">%s</div>\n\n' % code
return SOURCECODE_RE.sub(repl, lines)
class Blog(object):
def __init__(self, title, posts):
self.title = title
self.posts = posts
def __str__(self):
return config.TITLE
@property
def path(self):
return '%s%sindex.html' % (
config.OUT_PATH,
config.BLOG_URL, )
@property
def url(self):
return '/%s' % (config.BLOG_URL, )
class Post(object):
def __init__(self, pub_date, title, slug, content):
self.pub_date = pub_date
self.title = title
self.slug = slug
self.content = content
def __cmp__(self, other):
return cmp(self.pub_date, other.pub_date)
def __str__(self):
return self.title
@property
def path(self):
return '%s%s%s/index.html' % (
config.OUT_PATH,
config.BLOG_URL,
config.POST_URL.format(post=self), )
@property
def url(self):
return '/%s%s/' % (
config.BLOG_URL,
config.POST_URL.format(post=self), )
class Page(object):
def __init__(self, path, content, meta_data=None):
self._path = path
self.content = content
self.meta_data = meta_data
@property
def path(self):
return '%s%s/index.html' % (
config.OUT_PATH,
self._path, )
@property
def url(self):
return '/%s/' % (self._path, )
def post_from_filename(filename):
with open(filename) as post_file:
post_data = post_file.read()
headers, content = re.split(POST_HEADER_SEP_RE, post_data, 1)
headers = yaml.load(headers)
content = markdown.markdown(pygments_preprocess(content)).strip()
pub_date = headers['date']
title = headers['title']
slug, __ = os.path.splitext(os.path.basename(filename))
match = re.match('\d{4}-\d{2}-\d{2}-(.+)', slug)
if match:
slug = match.group(1)
return Post(pub_date, title, slug, content)
def blog_from_path(title, path):
posts = []
posts_path = os.path.join(path, 'posts/')
for filename in os.listdir(posts_path):
posts.append(post_from_filename(os.path.join(posts_path, filename)))
return Blog(title, list(reversed(sorted(posts))))
def page_from_filename(filename, base_path):
with open(filename) as page_file:
page_data = page_file.read()
header, content = re.split(POST_HEADER_SEP_RE, page_data, 1)
meta_data = yaml.load(header)
content = markdown.markdown(pygments_preprocess(content)).strip()
slug, __ = os.path.splitext(os.path.relpath(filename, base_path))
match = re.match('\d{4}-\d{2}-\d{2}-(.+)', slug)
if match:
slug = match.group(1)
return Page(slug, content, meta_data=meta_data)
def pages_from_path(path):
pages = []
for dirname, folders, filenames in os.walk(path):
for filename in filenames:
page_path = os.path.join(dirname, filename)
pages.append(page_from_filename(page_path, path))
return pages
def build():
blog = blog_from_path(config.TITLE, config.IN_PATH)
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(config.IN_PATH, 'templates/')))
# Copy static files
shutil.copytree(
os.path.join(config.IN_PATH, 'static/'),
config.OUT_PATH)
# Render static pages
pages = pages_from_path(os.path.join(config.IN_PATH, 'pages/'))
for page in pages:
page_template_name = page.meta_data.get('template', 'page.html')
page_template = environment.get_template(page_template_name)
if not os.path.isdir(os.path.dirname(page.path)):
os.makedirs(os.path.dirname(page.path))
with open(page.path, 'w') as out_file:
out_file.write(page_template.render(page=page))
# Render the base blog page
blog_template = environment.get_template('index.html')
if not os.path.isdir(os.path.dirname(blog.path)):
os.makedirs(os.path.dirname(blog.path))
with open(blog.path, 'w') as out_file:
out_file.write(blog_template.render(blog=blog))
# Render post pages
post_template = environment.get_template('post.html')
for post in blog.posts:
if not os.path.isdir(os.path.dirname(post.path)):
os.makedirs(os.path.dirname(post.path))
with open(post.path, 'w') as out_file:
out_file.write(post_template.render(blog=blog, post=post))
def clean():
try:
shutil.rmtree(config.OUT_PATH)
except OSError:
print '%s could not be deleted.' % config.OUT_PATH
def serve():
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
MIMETYPES = {
'.css': 'text/css',
'.html': 'text/html',
'.js': 'application/javascript',
}
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
path = self.path[1:]
path = os.path.join(config.OUT_PATH, path)
if self.path[-1] == '/':
path = os.path.join(path, 'index.html')
f = open(path)
self.send_response(200)
__, ext = os.path.splitext(self.path)
mimetype = MIMETYPES.get(ext, 'text/html')
self.send_header('Content-type', mimetype)
self.end_headers()
self.wfile.write(f.read())
f.close()
return
PORT = 8000
server = HTTPServer(('', PORT), Handler)
print "Serving at port", PORT
server.serve_forever()
| bsd-3-clause | 9,054,622,329,926,579,000 | 27.132743 | 81 | 0.596099 | false |
naterh/cloud-init-rax-pkg | cloudinit/sources/DataSourceConfigDrive.py | 1 | 11008 | # vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <[email protected]>
# Author: Joshua Harlow <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
from cloudinit.sources.helpers import openstack
LOG = logging.getLogger(__name__)
# Various defaults/constants...
DEFAULT_IID = "iid-dsconfigdrive"
DEFAULT_MODE = 'pass'
DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
}
VALID_DSMODES = ("local", "net", "pass", "disabled")
FS_TYPES = ('vfat', 'iso9660')
LABEL_TYPES = ('config-2',)
POSSIBLE_MOUNTS = ('sr', 'cd')
OPTICAL_DEVICES = tuple(
('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS for i in range(0, 2)))
class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
self.source = None
self.dsmode = 'local'
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
self.version = None
self.ec2_metadata = None
self.files = {}
def __str__(self):
root = sources.DataSource.__str__(self)
mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
mstr += "[source=%s]" % (self.source)
return mstr
def get_data(self):
found = None
md = {}
results = {}
if os.path.isdir(self.seed_dir):
try:
results = read_config_drive(self.seed_dir)
found = self.seed_dir
except openstack.NonReadable:
util.logexc(LOG, "Failed reading config drive from %s",
self.seed_dir)
if not found:
for dev in find_candidate_devs():
try:
# Set mtype if freebsd and turn off sync
if dev.startswith("/dev/cd"):
mtype = "cd9660"
sync = False
else:
mtype = None
sync = True
results = util.mount_cb(dev, read_config_drive,
mtype=mtype, sync=sync)
found = dev
except openstack.NonReadable:
pass
except util.MountFailedError:
pass
except openstack.BrokenMetadata:
util.logexc(LOG, "Broken config drive: %s", dev)
if found:
break
if not found:
return False
md = results.get('metadata', {})
md = util.mergemanydict([md, DEFAULT_METADATA])
user_dsmode = results.get('dsmode', None)
if user_dsmode not in VALID_DSMODES + (None,):
LOG.warn("User specified invalid mode: %s", user_dsmode)
user_dsmode = None
dsmode = get_ds_mode(cfgdrv_ver=results['version'],
ds_cfg=self.ds_cfg.get('dsmode'),
user=user_dsmode)
if dsmode == "disabled":
# most likely user specified
return False
# TODO(smoser): fix this, its dirty.
# we want to do some things (writing files and network config)
# only on first boot, and even then, we want to do so in the
# local datasource (so they happen earlier) even if the configured
# dsmode is 'net' or 'pass'. To do this, we check the previous
# instance-id
prev_iid = get_previous_iid(self.paths)
cur_iid = md['instance-id']
if prev_iid != cur_iid and self.dsmode == "local":
on_first_boot(results, distro=self.distro)
# dsmode != self.dsmode here if:
# * dsmode = "pass", pass means it should only copy files and then
# pass to another datasource
# * dsmode = "net" and self.dsmode = "local"
# so that user boothooks would be applied with network, the
# local datasource just gets out of the way, and lets the net claim
if dsmode != self.dsmode:
LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
return False
self.source = found
self.metadata = md
self.ec2_metadata = results.get('ec2-metadata')
self.userdata_raw = results.get('userdata')
self.version = results['version']
self.files.update(results.get('files', {}))
vd = results.get('vendordata')
if isinstance(vd, dict):
if 'cloud-init' in vd:
self.vendordata_raw = vd['cloud-init']
else:
self.vendordata_pure = vd
try:
self.vendordata_raw = openstack.convert_vendordata_json(vd)
except ValueError as e:
LOG.warn("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
return True
class DataSourceConfigDriveNet(DataSourceConfigDrive):
def __init__(self, sys_cfg, distro, paths):
DataSourceConfigDrive.__init__(self, sys_cfg, distro, paths)
self.dsmode = 'net'
def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):
"""Determine what mode should be used.
valid values are 'pass', 'disabled', 'local', 'net'
"""
# user passed data trumps everything
if user is not None:
return user
if ds_cfg is not None:
return ds_cfg
# at config-drive version 1, the default behavior was pass. That
# meant to not use use it as primary data source, but expect a ec2 metadata
# source. for version 2, we default to 'net', which means
# the DataSourceConfigDriveNet, would be used.
#
# this could change in the future. If there was definitive metadata
# that indicated presense of an openstack metadata service, then
# we could change to 'pass' by default also. The motivation for that
# would be 'cloud-init query' as the web service could be more dynamic
if cfgdrv_ver == 1:
return "pass"
return "net"
def read_config_drive(source_dir):
reader = openstack.ConfigDriveReader(source_dir)
finders = [
(reader.read_v2, [], {}),
(reader.read_v1, [], {}),
]
excps = []
for (functor, args, kwargs) in finders:
try:
return functor(*args, **kwargs)
except openstack.NonReadable as e:
excps.append(e)
raise excps[-1]
def get_previous_iid(paths):
# interestingly, for this purpose the "previous" instance-id is the current
# instance-id. cloud-init hasn't moved them over yet as this datasource
# hasn't declared itself found.
fname = os.path.join(paths.get_cpath('data'), 'instance-id')
try:
return util.load_file(fname).rstrip("\n")
except IOError:
return None
def on_first_boot(data, distro=None):
"""Performs any first-boot actions using data read from a config-drive."""
if not isinstance(data, dict):
raise TypeError("Config-drive data expected to be a dict; not %s"
% (type(data)))
networkapplied = False
jsonnet_conf = data.get('vendordata', {}).get('network_info')
if jsonnet_conf:
try:
LOG.debug("Updating network interfaces from JSON in config drive")
distro_user_config = distro.apply_network_json(jsonnet_conf)
networkapplied = True
except NotImplementedError:
LOG.debug(
"Distro does not implement networking setup via Vendor JSON.")
pass
net_conf = data.get("network_config", '')
if networkapplied is False and net_conf and distro:
LOG.debug("Updating network interfaces from config drive")
distro.apply_network(net_conf)
files = data.get('files', {})
if files:
LOG.debug("Writing %s injected files", len(files))
for (filename, content) in files.items():
if not filename.startswith(os.sep):
filename = os.sep + filename
try:
util.write_file(filename, content, mode=0o660)
except IOError:
util.logexc(LOG, "Failed writing file: %s", filename)
def find_candidate_devs(probe_optical=True):
"""Return a list of devices that may contain the config drive.
The returned list is sorted by search order where the first item has
should be searched first (highest priority)
config drive v1:
Per documentation, this is "associated as the last available disk on the
instance", and should be VFAT.
Currently, we do not restrict search list to "last available disk"
config drive v2:
Disk should be:
* either vfat or iso9660 formated
* labeled with 'config-2'
"""
# query optical drive to get it in blkid cache for 2.6 kernels
if probe_optical:
for device in OPTICAL_DEVICES:
try:
util.find_devs_with(path=device)
except util.ProcessExecutionError:
pass
by_fstype = []
for fs_type in FS_TYPES:
by_fstype.extend(util.find_devs_with("TYPE=%s" % (fs_type)))
by_label = []
for label in LABEL_TYPES:
by_label.extend(util.find_devs_with("LABEL=%s" % (label)))
# give preference to "last available disk" (vdb over vda)
# note, this is not a perfect rendition of that.
by_fstype.sort(reverse=True)
by_label.sort(reverse=True)
# combine list of items by putting by-label items first
# followed by fstype items, but with dupes removed
candidates = (by_label + [d for d in by_fstype if d not in by_label])
# We are looking for a block device or partition with necessary label or
# an unpartitioned block device (ex sda, not sda1)
devices = [d for d in candidates
if d in by_label or not util.is_partition(d)]
return devices
# Used to match classes to dependencies
datasources = [
(DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )),
(DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
| gpl-3.0 | -6,002,865,558,653,824,000 | 35.450331 | 79 | 0.601017 | false |
D8TM/railtracker | mapfeed/fields.py | 1 | 17005 | # https://github.com/django-nonrel/djangotoolbox/blob/master/djangotoolbox/fields.py
# All fields except for BlobField written by Jonas Haag <[email protected]>
from django.core.exceptions import ValidationError
from django.utils.importlib import import_module
from django.db import models
from django.db.models.fields.subclassing import Creator
from django.db.utils import IntegrityError
from django.db.models.fields.related import add_lazy_relation
__all__ = ('RawField', 'ListField', 'SetField', 'DictField',
'EmbeddedModelField', 'BlobField')
EMPTY_ITER = ()
class _FakeModel(object):
"""
An object of this class can pass itself off as a model instance
when used as an arguments to Field.pre_save method (item_fields
of iterable fields are not actually fields of any model).
"""
def __init__(self, field, value):
setattr(self, field.attname, value)
class RawField(models.Field):
"""
Generic field to store anything your database backend allows you
to. No validation or conversions are done for this field.
"""
def get_internal_type(self):
"""
Returns this field's kind. Nonrel fields are meant to extend
the set of standard fields, so fields subclassing them should
get the same internal type, rather than their own class name.
"""
return 'RawField'
class AbstractIterableField(models.Field):
"""
Abstract field for fields for storing iterable data type like
``list``, ``set`` and ``dict``.
You can pass an instance of a field as the first argument.
If you do, the iterable items will be piped through the passed
field's validation and conversion routines, converting the items
to the appropriate data type.
"""
def __init__(self, item_field=None, *args, **kwargs):
default = kwargs.get(
'default', None if kwargs.get('null') else EMPTY_ITER)
# Ensure a new object is created every time the default is
# accessed.
if default is not None and not callable(default):
kwargs['default'] = lambda: self._type(default)
super(AbstractIterableField, self).__init__(*args, **kwargs)
# Either use the provided item_field or a RawField.
if item_field is None:
item_field = RawField()
elif callable(item_field):
item_field = item_field()
self.item_field = item_field
# We'll be pretending that item_field is a field of a model
# with just one "value" field.
assert not hasattr(self.item_field, 'attname')
self.item_field.set_attributes_from_name('value')
def contribute_to_class(self, cls, name):
self.item_field.model = cls
self.item_field.name = name
super(AbstractIterableField, self).contribute_to_class(cls, name)
# If items' field uses SubfieldBase we also need to.
item_metaclass = getattr(self.item_field, '__metaclass__', None)
if item_metaclass and issubclass(item_metaclass, models.SubfieldBase):
setattr(cls, self.name, Creator(self))
if isinstance(self.item_field, models.ForeignKey) and isinstance(self.item_field.rel.to, basestring):
"""
If rel.to is a string because the actual class is not yet defined, look up the
actual class later. Refer to django.models.fields.related.RelatedField.contribute_to_class.
"""
def _resolve_lookup(_, resolved_model, __):
self.item_field.rel.to = resolved_model
self.item_field.do_related_class(self, cls)
add_lazy_relation(cls, self, self.item_field.rel.to, _resolve_lookup)
def _map(self, function, iterable, *args, **kwargs):
"""
Applies the function to items of the iterable and returns
an iterable of the proper type for the field.
Overriden by DictField to only apply the function to values.
"""
return self._type(function(element, *args, **kwargs)
for element in iterable)
def to_python(self, value):
"""
Passes value items through item_field's to_python.
"""
if value is None:
return None
return self._map(self.item_field.to_python, value)
def pre_save(self, model_instance, add):
"""
Gets our value from the model_instance and passes its items
through item_field's pre_save (using a fake model instance).
"""
value = getattr(model_instance, self.attname)
if value is None:
return None
return self._map(
lambda item: self.item_field.pre_save(
_FakeModel(self.item_field, item), add),
value)
def get_db_prep_save(self, value, connection):
"""
Applies get_db_prep_save of item_field on value items.
"""
if value is None:
return None
return self._map(self.item_field.get_db_prep_save, value,
connection=connection)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Passes the value through get_db_prep_lookup of item_field.
"""
# TODO/XXX: Remove as_lookup_value() once we have a cleaner
# solution for dot-notation queries.
# See: https://groups.google.com/group/django-non-relational/browse_thread/thread/6056f8384c9caf04/89eeb9fb22ad16f3).
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return self.item_field.get_db_prep_lookup(
lookup_type, value, connection=connection, prepared=prepared)
def validate(self, values, model_instance):
try:
iter(values)
except TypeError:
raise ValidationError("Value of type %r is not iterable." %
type(values))
def formfield(self, **kwargs):
raise NotImplementedError("No form field implemented for %r." %
type(self))
class ListField(AbstractIterableField):
"""
Field representing a Python ``list``.
If the optional keyword argument `ordering` is given, it must be a
callable that is passed to :meth:`list.sort` as `key` argument. If
`ordering` is given, the items in the list will be sorted before
sending them to the database.
"""
_type = list
def __init__(self, *args, **kwargs):
self.ordering = kwargs.pop('ordering', None)
if self.ordering is not None and not callable(self.ordering):
raise TypeError("'ordering' has to be a callable or None, "
"not of type %r." % type(self.ordering))
super(ListField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'ListField'
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname)
if value is None:
return None
if value and self.ordering:
value.sort(key=self.ordering)
return super(ListField, self).pre_save(model_instance, add)
class SetField(AbstractIterableField):
"""
Field representing a Python ``set``.
"""
_type = set
def get_internal_type(self):
return 'SetField'
def value_to_string(self, obj):
"""
Custom method for serialization, as JSON doesn't support
serializing sets.
"""
return list(self._get_val_from_obj(obj))
class DictField(AbstractIterableField):
"""
Field representing a Python ``dict``.
Type conversions described in :class:`AbstractIterableField` only
affect values of the dictionary, not keys. Depending on the
back-end, keys that aren't strings might not be allowed.
"""
_type = dict
def get_internal_type(self):
return 'DictField'
def _map(self, function, iterable, *args, **kwargs):
return self._type((key, function(value, *args, **kwargs))
for key, value in iterable.iteritems())
def validate(self, values, model_instance):
if not isinstance(values, dict):
raise ValidationError("Value is of type %r. Should be a dict." %
type(values))
class EmbeddedModelField(models.Field):
"""
Field that allows you to embed a model instance.
:param embedded_model: (optional) The model class of instances we
will be embedding; may also be passed as a
string, similar to relation fields
TODO: Make sure to delegate all signals and other field methods to
the embedded instance (not just pre_save, get_db_prep_* and
to_python).
"""
__metaclass__ = models.SubfieldBase
def __init__(self, embedded_model=None, *args, **kwargs):
self.embedded_model = embedded_model
kwargs.setdefault('default', None)
super(EmbeddedModelField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'EmbeddedModelField'
def _set_model(self, model):
"""
Resolves embedded model class once the field knows the model it
belongs to.
If the model argument passed to __init__ was a string, we need
to make sure to resolve that string to the corresponding model
class, similar to relation fields.
However, we need to know our own model to generate a valid key
for the embedded model class lookup and EmbeddedModelFields are
not contributed_to_class if used in iterable fields. Thus we
rely on the collection field telling us its model (by setting
our "model" attribute in its contribute_to_class method).
"""
self._model = model
if model is not None and isinstance(self.embedded_model, basestring):
def _resolve_lookup(self_, resolved_model, model):
self.embedded_model = resolved_model
add_lazy_relation(model, self, self.embedded_model, _resolve_lookup)
model = property(lambda self: self._model, _set_model)
def stored_model(self, column_values):
"""
Returns the fixed embedded_model this field was initialized
with (typed embedding) or tries to determine the model from
_module / _model keys stored together with column_values
(untyped embedding).
We give precedence to the field's definition model, as silently
using a differing serialized one could hide some data integrity
problems.
Note that a single untyped EmbeddedModelField may process
instances of different models (especially when used as a type
of a collection field).
"""
module = column_values.pop('_module', None)
model = column_values.pop('_model', None)
if self.embedded_model is not None:
return self.embedded_model
elif module is not None:
return getattr(import_module(module), model)
else:
raise IntegrityError("Untyped EmbeddedModelField trying to load "
"data without serialized model class info.")
def to_python(self, value):
"""
Passes embedded model fields' values through embedded fields
to_python methods and reinstiatates the embedded instance.
We expect to receive a field.attname => value dict together
with a model class from back-end database deconversion (which
needs to know fields of the model beforehand).
"""
# Either the model class has already been determined during
# deconverting values from the database or we've got a dict
# from a deserializer that may contain model class info.
if isinstance(value, tuple):
embedded_model, attribute_values = value
elif isinstance(value, dict):
embedded_model = self.stored_model(value)
attribute_values = value
else:
return value
# Pass values through respective fields' to_python, leaving
# fields for which no value is specified uninitialized.
attribute_values = dict(
(field.attname, field.to_python(attribute_values[field.attname]))
for field in embedded_model._meta.fields
if field.attname in attribute_values)
# Create the model instance.
instance = embedded_model(**attribute_values)
instance._state.adding = False
return instance
def get_db_prep_save(self, embedded_instance, connection):
"""
Applies pre_save and get_db_prep_save of embedded instance
fields and passes a field => value mapping down to database
type conversions.
The embedded instance will be saved as a column => value dict
in the end (possibly augmented with info about instance's model
for untyped embedding), but because we need to apply database
type conversions on embedded instance fields' values and for
these we need to know fields those values come from, we need to
entrust the database layer with creating the dict.
"""
if embedded_instance is None:
return None
# The field's value should be an instance of the model given in
# its declaration or at least of some model.
embedded_model = self.embedded_model or models.Model
if not isinstance(embedded_instance, embedded_model):
raise TypeError("Expected instance of type %r, not %r." %
(embedded_model, type(embedded_instance)))
# Apply pre_save and get_db_prep_save of embedded instance
# fields, create the field => value mapping to be passed to
# storage preprocessing.
field_values = {}
add = embedded_instance._state.adding
for field in embedded_instance._meta.fields:
value = field.get_db_prep_save(
field.pre_save(embedded_instance, add), connection=connection)
# Exclude unset primary keys (e.g. {'id': None}).
if field.primary_key and value is None:
continue
field_values[field] = value
# Let untyped fields store model info alongside values.
# We use fake RawFields for additional values to avoid passing
# embedded_instance to database conversions and to give
# back-ends a chance to apply generic conversions.
if self.embedded_model is None:
module_field = RawField()
module_field.set_attributes_from_name('_module')
model_field = RawField()
model_field.set_attributes_from_name('_model')
field_values.update(
((module_field, embedded_instance.__class__.__module__),
(model_field, embedded_instance.__class__.__name__)))
# This instance will exist in the database soon.
# TODO.XXX: Ensure that this doesn't cause race conditions.
embedded_instance._state.adding = False
return field_values
# TODO/XXX: Remove this once we have a cleaner solution.
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return value
class BlobField(models.Field):
"""
A field for storing blobs of binary data.
The value might either be a string (or something that can be
converted to a string), or a file-like object.
In the latter case, the object has to provide a ``read`` method
from which the blob is read.
"""
def get_internal_type(self):
return 'BlobField'
def formfield(self, **kwargs):
"""
A file widget is provided, but use model FileField or
ImageField for storing specific files most of the time.
"""
from .widgets import BlobWidget
from django.forms import FileField
defaults = {'form_class': FileField, 'widget': BlobWidget}
defaults.update(kwargs)
return super(BlobField, self).formfield(**defaults)
def get_db_prep_save(self, value, connection):
if hasattr(value, 'read'):
return value.read()
else:
return str(value)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
raise TypeError("BlobFields do not support lookups.")
def value_to_string(self, obj):
return str(self._get_val_from_obj(obj))
class PointField(models.Field):
def __init__(self, x_coord=0, y_coord=0):
self.x = x_coord
self.y = y_coord
def value_to_string(self, obj):
return "(" + self.x + ", " + self.y + ")"
| apache-2.0 | 6,344,079,010,264,808,000 | 37.735763 | 125 | 0.626874 | false |
gberthou/raiders | components.py | 1 | 2144 | # Generated file: do not edit, use gen_comps.py instead
import ecs
class MovementTarget(ecs.Component):
def __init__(self, target):
self.target = target
class AttackTarget(ecs.Component):
def __init__(self, target, dt):
self.target = target
self.dt = dt
class Path(ecs.Component):
def __init__(self, path, currentIndex):
self.path = path
self.currentIndex = currentIndex
class Weapon(ecs.Component):
def __init__(self, atk, atkSpeed, atkRange):
self.atk = atk
self.atkSpeed = atkSpeed
self.atkRange = atkRange
class Armor(ecs.Component):
def __init__(self, defense, slowFactor):
self.defense = defense
self.slowFactor = slowFactor
class Vulnerable(ecs.Component):
def __init__(self, hpmax, currenthp, visibility):
self.hpmax = hpmax
self.currenthp = currenthp
self.visibility = visibility
class Fighter(ecs.Component):
def __init__(self, name, team, movSpeed, fov):
self.name = name
self.team = team
self.movSpeed = movSpeed
self.fov = fov
class PhysicalObject(ecs.Component):
def __init__(self, size):
self.size = size
class Door(ecs.Component):
def __init__(self, tile0, tile1, isOpen):
self.tile0 = tile0
self.tile1 = tile1
self.isOpen = isOpen
class DrawableMap(ecs.Component):
def __init__(self, tilemap, chunkset):
self.tilemap = tilemap
self.chunkset = chunkset
class DrawableObject(ecs.Component):
def __init__(self, surface):
self.surface = surface
class DrawableFighter(ecs.Component):
def __init__(self, surface):
self.surface = surface
class DrawableTop(ecs.Component):
def __init__(self, surface):
self.surface = surface
class DrawableHUD(ecs.Component):
def __init__(self, surface):
self.surface = surface
class Position(ecs.Component):
def __init__(self, x, y):
self.x = x
self.y = y
class Selected(ecs.Component):
def __init__(self):
pass
class Leader(ecs.Component):
def __init__(self):
pass
| gpl-3.0 | -3,277,644,113,238,579,700 | 23.930233 | 55 | 0.621735 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.1/Lib/codeop.py | 1 | 2821 | """Utility to compile possibly incomplete Python source code."""
__all__ = ["compile_command"]
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default "<input>"
symbol -- optional grammar start symbol; "single" (default) or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError or OverflowError if the command is a syntax error
(OverflowError if the error is in a numeric constant)
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If
it compiles as is, it's complete. If it compiles with one \n
appended, we expect more. If it doesn't compile either way, we
compare the error we get when compiling with \n or \n\n appended.
If the errors are the same, the code is broken. But if the errors
are different, we expect more. Not intuitive; not even guaranteed
to hold in future releases; but this matches the compiler's
behavior from Python 1.4 through 1.5.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing
with a successful outcome before reaching the end of the source;
in this case, trailing symbols may be ignored instead of causing an
error. For example, a backslash followed by two newlines may be
followed by arbitrary garbage. This will be fixed once the API
for the parser is better.
"""
# Check for source consisting of only blank lines and comments
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone
else:
source = "pass" # Replace it with a 'pass' statement
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compile(source, filename, symbol)
except SyntaxError, err:
pass
try:
code1 = compile(source + "\n", filename, symbol)
except SyntaxError, err1:
pass
try:
code2 = compile(source + "\n\n", filename, symbol)
except SyntaxError, err2:
pass
if code:
return code
try:
e1 = err1.__dict__
except AttributeError:
e1 = err1
try:
e2 = err2.__dict__
except AttributeError:
e2 = err2
if not code1 and e1 == e2:
raise SyntaxError, err1
| mit | 1,996,009,731,289,652,700 | 32.583333 | 79 | 0.65296 | false |
smart-solution/icy | icy_partner_history/icy_partner_history.py | 2 | 13079 | # -*- coding: utf-8 -*-
###############################################
from osv import fields,osv
from tools.sql import drop_view_if_exists
class res_partner_history(osv.osv):
_name = "res_partner_history"
_description = "Partner History"
_auto = False
_columns = {
'date': fields.datetime('Datum', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', select=True, readonly=True),
'partner_name': fields.char('Partnernaam', select=True, readonly=True),
'id': fields.integer('ID'),
'description': fields.char('Omschrijving', readonly=True),
'reference': fields.text('Referentie', readonly=True),
'user_id': fields.many2one('res.users', 'Gebruiker', select=True, readonly=True),
'type': fields.char('Type', readonly=True),
'amount': fields.float('Bedrag', readonly=True),
'state': fields.char('Status', readonly=True),
'sale_order_id': fields.many2one('sale.order', 'Verkooporder', select=True, readonly=True),
'purchase_order_id': fields.many2one('purchase.order', 'Inkooporder', select=True, readonly=True),
'stock_picking_id': fields.many2one('stock.picking', 'Pakbon', select=True, readonly=True),
'account_invoice_id': fields.many2one('account.invoice', 'Factuur', select=True, readonly=True),
'crm_helpdesk_id': fields.many2one('crm.helpdesk', 'Service Call', select=True, readonly=True),
'payment_order_id': fields.many2one('payment.line', 'Betaling', select=True, readonly=True),
'crm_meeting_id': fields.many2one('crm.meeting', 'Afspraak', select=True, readonly=True),
'crm_phonecall_id': fields.many2one('crm.phonecall', 'Telefoon', select=True, readonly=True),
'crm_lead_id': fields.many2one('crm.lead', 'Verkoopkans', select=True, readonly=True),
'task_id': fields.many2one('project.task', 'Taak', select=True, readonly=True),
'message_id': fields.many2one('mail.message', 'Bericht', select=True, readonly=True),
'customer':fields.boolean('Klant'),
'supplier':fields.boolean('Leverancier'),
'consumer':fields.boolean('Consumer'),
}
_order = 'date desc'
def init(self, cr):
drop_view_if_exists(cr, 'res_partner_history')
# cr.execute("""
#CREATE OR REPLACE FUNCTION strip_tags(text)
# RETURNS text AS
#$BODY$
# SELECT regexp_replace(regexp_replace($1, E'(?x)<[^>]*?(\s alt \s* = \s* ([\'"]) ([^>]*?) \2) [^>]*? >', E'\3'), E'(?x)(< [^>]*? >)', '', 'g')
#$BODY$
# LANGUAGE sql VOLATILE
# COST 100;
#ALTER FUNCTION strip_tags(text)
# OWNER TO postgres;
#""")
cr.execute("""create or replace view res_partner_history
as
select (1000000 + sale_order.id) as id,
partner_id,
res_partner.name as partner_name,
sale_order.name as description,
client_order_ref as reference,
sale_order.create_uid as user_id,
date_order::date as date,
'Verkooporder' as type,
amount_total as amount,
case when state = 'done' then 'Uitgevoerd'
when state = 'sent' then 'Verzonden'
when state = 'manual' then 'Handmatig'
when state = 'invoice_except' then 'Probleem'
when state = 'draft' then 'Verkoopofferte'
when state = 'progress' then 'In Uitvoering'
when state = 'cancel' then 'Geannuleerd'
else 'Ongekend' end as state,
sale_order.id as sale_order_id,
NULL::integer as purchase_order_id,
NULL::integer as stock_picking_id,
NULL::integer as account_invoice_id,
NULL::integer as crm_helpdesk_id,
NULL::integer as payment_order_id,
NULL::integer as crm_meeting_id,
NULL::integer as crm_phonecall_id,
NULL::integer as crm_lead_id,
NULL::integer as task_id,
NULL::integer as message_id,
customer,
supplier,
consumer
from sale_order
join res_partner on (res_partner.id = partner_id)
union
select (2000000 + purchase_order.id) as id,
partner_id,
res_partner.name as partner_name,
purchase_order.name as description,
partner_ref as reference,
purchase_order.create_uid as user_id,
date_order::date as date,
'Inkooporder' as type,
amount_total as amount,
case when state = 'approved' then 'Inkooporder'
when state = 'draft' then 'Inkoopofferte'
when state = 'cancel' then 'Geannuleerd'
else 'Ongekend' end as state,
NULL::integer as sale_order_id,
purchase_order.id as purchase_order_id,
NULL::integer as stock_picking_id,
NULL::integer as account_invoice_id,
NULL::integer as crm_helpdesk_id,
NULL::integer as payment_order_id,
NULL::integer as crm_meeting_id,
NULL::integer as crm_phonecall_id,
NULL::integer as crm_lead_id,
NULL::integer as task_id,
NULL::integer as message_id,
customer,
supplier,
consumer
from purchase_order
join res_partner on (res_partner.id = partner_id)
union
select (3000000 + stock_picking.id) as id,
partner_id,
res_partner.name as partner_name,
stock_picking.name as description,
origin as reference,
stock_picking.create_uid as user_id,
stock_picking.date::date as date,
case when stock_picking.type = 'in' then 'Ontvangst' else 'Levering' end as type,
NULL::float as amount,
case when state = 'done' then 'Uitgevoerd'
when state = 'assigned' then 'In Voorbereiding'
when state = 'cancel' then 'Geannuleerd'
else 'Ongekend' end as state,
sale_id as sale_order_id,
purchase_id as purchase_order_id,
stock_picking.id as stock_picking_id,
NULL::integer as account_invoice_id,
NULL::integer as crm_helpdesk_id,
NULL::integer as payment_order_id,
NULL::integer as crm_meeting_id,
NULL::integer as crm_phonecall_id,
NULL::integer as crm_lead_id,
NULL::integer as task_id,
NULL::integer as message_id,
customer,
supplier,
consumer
from stock_picking
join res_partner on (res_partner.id = partner_id)
union
select (4000000 + account_invoice.id) as id,
partner_id,
res_partner.name as partner_name,
number as description,
origin as reference,
account_invoice.create_uid as user_id,
date_invoice::date as date,
case when account_invoice.type = 'in_invoice' then 'Leverancierfactuur'
when account_invoice.type = 'in_refund' then 'Leverancierkredietnota'
when account_invoice.type = 'out_invoice' then 'Klantfactuur'
when account_invoice.type = 'out_refund' then 'Klantkredietnota'
else 'Ongekend' end as type,
amount_total as amount,
case when state = 'open' then 'Open'
when state = 'paid' then 'Betaald'
when state = 'draft' then 'In Voorbereiding'
when state = 'proforma2' then 'Pro Forma'
when state = 'cancel' then 'Geannuleerd'
else 'Ongekend' end as state,
NULL::integer as sale_order_id,
NULL::integer as purchase_order_id,
NULL::integer as stock_picking_id,
account_invoice.id as account_invoice_id,
NULL::integer as crm_helpdesk_id,
NULL::integer as payment_order_id,
NULL::integer as crm_meeting_id,
NULL::integer as crm_phonecall_id,
NULL::integer as crm_lead_id,
NULL::integer as task_id,
NULL::integer as message_id,
customer,
supplier,
consumer
from account_invoice
join res_partner on (res_partner.id = partner_id)
union
select (5000000 + crm_helpdesk.id) as id,
partner_id,
res_partner.name as partner_name,
cast(crm_helpdesk.id as text) as description,
crm_helpdesk.name as reference,
crm_helpdesk.create_uid as user_id,
crm_helpdesk.date::date as date,
'Servicecall' as type,
NULL::float as amount,
case when state = 'done' then 'Afgesloten'
when state = 'open' then 'Open'
when state = 'draft' then 'In Voorbereiding'
when state = 'cancel' then 'Geannuleerd'
else 'Ongekend' end as state,
NULL::integer as sale_order_id,
NULL::integer as purchase_order_id,
NULL::integer as stock_picking_id,
NULL::integer as account_invoice_id,
crm_helpdesk.id as crm_helpdesk_id,
NULL::integer as payment_order_id,
NULL::integer as crm_meeting_id,
NULL::integer as crm_phonecall_id,
NULL::integer as crm_lead_id,
NULL::integer as task_id,
NULL::integer as message_id,
customer,
supplier,
consumer
from crm_helpdesk
join res_partner on (res_partner.id = partner_id)
union
select (6000000 + payment_line.id) as id,
partner_id,
res_partner.name as partner_name,
payment_order.reference as description,
communication as reference,
payment_order.user_id,
payment_line.date::date as date,
'Betaling' as type,
amount_currency as amount,
case when payment_order.state = 'done' then 'Afgesloten'
when payment_order.state = 'open' then 'Open'
when payment_order.state = 'draft' then 'In Voorbereiding'
when payment_order.state = 'cancel' then 'Geannuleerd'
else 'Ongekend' end as state,
NULL::integer as sale_order_id,
NULL::integer as purchase_order_id,
NULL::integer as stock_picking_id,
NULL::integer as account_invoice_id,
NULL::integer as crm_helpdesk_id,
payment_line.id as payment_order_id,
NULL::integer as crm_meeting_id,
NULL::integer as crm_phonecall_id,
NULL::integer as crm_lead_id,
NULL::integer as task_id,
NULL::integer as message_id,
customer,
supplier,
consumer
from payment_line
join payment_order on (order_id = payment_order.id)
join res_partner on (res_partner.id = partner_id)
union
select (7000000 + crm_meeting.id) as id,
partner_id,
res_partner.name as partner_name,
crm_meeting.name as description,
location as reference,
crm_meeting.user_id,
crm_meeting.date::date as date,
'Afspraak' as type,
NULL::float as amount,
case when state = 'done' then 'Afgesloten'
when state = 'open' then 'Open'
when state = 'draft' then 'In Voorbereiding'
when state = 'cancel' then 'Geannuleerd'
else 'Ongekend' end as state,
NULL::integer as sale_order_id,
NULL::integer as purchase_order_id,
NULL::integer as stock_picking_id,
NULL::integer as account_invoice_id,
NULL::integer as crm_helpdesk_id,
NULL::integer as payment_order_id,
crm_meeting.id as crm_meeting_id,
NULL::integer as crm_phonecall_id,
NULL::integer as crm_lead_id,
NULL::integer as task_id,
NULL::integer as message_id,
customer,
supplier,
consumer
from crm_meeting
join crm_meeting_partner_rel on (meeting_id = crm_meeting.id)
join res_partner on (res_partner.id = partner_id)
union
select (8000000 + crm_phonecall.id) as id,
partner_id,
res_partner.name as partner_name,
crm_phonecall.name as description,
description as reference,
crm_phonecall.user_id,
crm_phonecall.date::date as date,
'Telefoon' as type,
NULL::float as amount,
case when state = 'done' then 'Afgesloten'
when state = 'open' then 'Open'
when state = 'draft' then 'In Voorbereiding'
when state = 'cancel' then 'Geannuleerd'
else 'Ongekend' end as state,
NULL::integer as sale_order_id,
NULL::integer as purchase_order_id,
NULL::integer as stock_picking_id,
NULL::integer as account_invoice_id,
NULL::integer as crm_helpdesk_id,
NULL::integer as payment_order_id,
NULL::integer as crm_meeting_id,
crm_phonecall.id as crm_phonecall_id,
NULL::integer as crm_lead_id,
NULL::integer as task_id,
NULL::integer as message_id,
customer,
supplier,
consumer
from crm_phonecall
join res_partner on (res_partner.id = partner_id)
union
select (9000000 + crm_lead.id) as id,
partner_id,
res_partner.name as partner_name,
crm_lead.name as description,
description as reference,
crm_lead.user_id,
date_open::date as date,
'Verkoopkans' as type,
NULL::float as amount,
case when state = 'done' then 'Afgesloten'
when state = 'open' then 'Open'
when state = 'draft' then 'In Voorbereiding'
when state = 'cancel' then 'Geannuleerd'
else 'Ongekend' end as state,
NULL::integer as sale_order_id,
NULL::integer as purchase_order_id,
NULL::integer as stock_picking_id,
NULL::integer as account_invoice_id,
NULL::integer as crm_helpdesk_id,
NULL::integer as payment_order_id,
NULL::integer as crm_meeting_id,
NULL::integer as crm_phonecall_id,
crm_lead.id as crm_lead_id,
NULL::integer as task_id,
NULL::integer as message_id,
customer,
supplier,
consumer
from crm_lead
join res_partner on (res_partner.id = partner_id)
union
select (11000000 + mail_message.id) as id,
res_id as partner_id,
res_partner.name as partner_name,
record_name as description,
strip_tags(body) as reference,
mail_message.create_uid as user_id,
mail_message.date::date as date,
'Bericht' as type,
NULL::float as amount,
case when mail_message.type = 'notification' then 'Bericht'
when mail_message.type = 'comment' then 'Commentaar'
else 'Ongekend' end as state,
NULL::integer as sale_order_id,
NULL::integer as purchase_order_id,
NULL::integer as stock_picking_id,
NULL::integer as account_invoice_id,
NULL::integer as crm_helpdesk_id,
NULL::integer as payment_order_id,
NULL::integer as crm_meeting_id,
NULL::integer as crm_phonecall_id,
NULL::integer as crm_lead_id,
NULL::integer as task_id,
mail_message.id as message_id,
customer,
supplier,
consumer
from mail_message
join res_partner on (res_partner.id = res_id)
where model = 'res.partner'
order by partner_id, date desc, id desc;""")
res_partner_history()
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'partner_history_ids': fields.one2many('res.partner.history', 'partner_id', 'Geschiedenis'),
}
res_partner()
| lgpl-3.0 | -1,187,325,361,287,801,300 | 32.88342 | 146 | 0.706552 | false |
simodalla/mezzanine_nowait | nowait/tests/test_forms.py | 1 | 1539 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.test import TestCase
from django.utils.timezone import now, timedelta
from .factories import BookingType30F, UserF
from ..models import Booking, SlotTime
from ..forms import BookingCreateForm
class BookingCreateFormTest(TestCase):
def setUp(self):
start = now()
self.booking_type = BookingType30F()
self.slottime = SlotTime.objects.create(
booking_type=self.booking_type,
start=start,
end=start + timedelta(minutes=self.booking_type.slot_length))
self.booker = UserF()
self.client.login(username=self.booker.username,
password=self.booker.username)
self.data = {'slottime': self.slottime.pk,
'notes': 'notes on booking',
'telephone': '+399900990'}
def test_form_is_not_valid(self):
"""
Test that clean method raise ValidationError if slottime pk passed in
form is already linked to Booking object.
"""
Booking.objects.create(booker=self.booker, slottime=self.slottime)
form = BookingCreateForm(self.data)
self.assertFalse(form.is_valid())
self.assertIn('__all__', form.errors)
self.assertEqual(form.errors['__all__'][0],
'Slot time selected is already assigned')
def test_form_is_valid(self):
form = BookingCreateForm(self.data)
self.assertTrue(form.is_valid())
| bsd-3-clause | -8,847,872,106,724,114,000 | 35.642857 | 77 | 0.62833 | false |
ondrokrc/gramps | gramps/gui/filters/sidebar/_citationsidebarfilter.py | 1 | 9120 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# gtk
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from ...widgets import MonitoredMenu, DateEntry, BasicEntry
from gramps.gen.lib import Citation
from .. import build_filter_model
from . import SidebarFilter
from gramps.gen.filters import GenericFilterFactory, rules
from gramps.gen.filters.rules.citation import (RegExpIdOf, HasCitation, HasTag,
HasNoteRegexp, MatchesFilter,
HasSource, RegExpSourceIdOf,
HasSourceNoteRegexp)
from gramps.gen.utils.string import conf_strings
GenericCitationFilter = GenericFilterFactory('Citation')
#-------------------------------------------------------------------------
#
# CitationSidebarFilter class
#
#-------------------------------------------------------------------------
class CitationSidebarFilter(SidebarFilter):
def __init__(self, dbstate, uistate, clicked):
self.clicked_func = clicked
self.filter_src_id = BasicEntry()
self.filter_src_title = BasicEntry()
self.filter_src_author = BasicEntry()
self.filter_src_abbr = BasicEntry()
self.filter_src_pub = BasicEntry()
self.filter_src_note = BasicEntry()
self.filter_id = Gtk.Entry()
self.filter_page = Gtk.Entry()
self.filter_date = DateEntry(uistate, [])
self.filter_conf = Gtk.ComboBox()
model = Gtk.ListStore(str)
for conf_value in sorted(conf_strings.keys()):
model.append((_(conf_strings[conf_value]),))
self.filter_conf.set_model(model)
self.filter_conf.set_active(Citation.CONF_NORMAL)
self.filter_note = Gtk.Entry()
self.filter_regex = Gtk.CheckButton(label=_('Use regular expressions'))
self.tag = Gtk.ComboBox()
self.generic = Gtk.ComboBox()
SidebarFilter.__init__(self, dbstate, uistate, "Citation")
def create_widget(self):
cell = Gtk.CellRendererText()
cell.set_property('width', self._FILTER_WIDTH)
cell.set_property('ellipsize', self._FILTER_ELLIPSIZE)
self.generic.pack_start(cell, True)
self.generic.add_attribute(cell, 'text', 0)
self.on_filters_changed('Citation')
cell = Gtk.CellRendererText()
cell.set_property('width', self._FILTER_WIDTH)
cell.set_property('ellipsize', self._FILTER_ELLIPSIZE)
self.filter_conf.pack_start(cell, True)
self.filter_conf.add_attribute(cell, 'text', 0)
cell = Gtk.CellRendererText()
cell.set_property('width', self._FILTER_WIDTH)
cell.set_property('ellipsize', self._FILTER_ELLIPSIZE)
self.tag.pack_start(cell, True)
self.tag.add_attribute(cell, 'text', 0)
self.add_heading(_('Source:'))
self.add_text_entry(_('ID'), self.filter_src_id)
self.add_text_entry(_('Title'), self.filter_src_title)
self.add_text_entry(_('Author'), self.filter_src_author)
self.add_text_entry(_('Abbreviation'), self.filter_src_abbr)
self.add_text_entry(_('Publication'), self.filter_src_pub)
self.add_text_entry(_('Note'), self.filter_src_note)
self.add_heading(_('Citation:'))
self.add_text_entry(_('ID'), self.filter_id)
self.add_text_entry(_('Volume/Page'), self.filter_page)
self.add_text_entry(_('Date'), self.filter_date)
self.add_entry(_('Citation: Minimum Confidence|Min. Conf.'), self.filter_conf)
self.add_text_entry(_('Note'), self.filter_note)
self.add_entry(_('Tag'), self.tag)
self.add_filter_entry(_('Custom filter'), self.generic)
self.add_entry(None, self.filter_regex)
def clear(self, obj):
self.filter_src_id.set_text('')
self.filter_src_title.set_text('')
self.filter_src_author.set_text('')
self.filter_src_abbr.set_text('')
self.filter_src_pub.set_text('')
self.filter_src_note.set_text('')
self.filter_id.set_text('')
self.filter_page.set_text('')
self.filter_date.set_text('')
self.filter_conf.set_active(Citation.CONF_NORMAL)
self.filter_note.set_text('')
self.tag.set_active(0)
self.generic.set_active(0)
def get_filter(self):
src_id = str(self.filter_src_id.get_text()).strip()
src_title = str(self.filter_src_title.get_text()).strip()
src_author = str(self.filter_src_author.get_text()).strip()
src_abbr = str(self.filter_src_abbr.get_text()).strip()
src_pub = str(self.filter_src_pub.get_text()).strip()
src_note = str(self.filter_src_note.get_text()).strip()
gid = str(self.filter_id.get_text()).strip()
page = str(self.filter_page.get_text()).strip()
date = str(self.filter_date.get_text()).strip()
model = self.filter_conf.get_model()
node = self.filter_conf.get_active_iter()
conf_name = model.get_value(node, 0) # The value is actually the text
conf = Citation.CONF_NORMAL
for i in list(conf_strings.keys()):
if _(conf_strings[i]) == conf_name:
conf = i
break
# conf = self.citn.get_confidence_level()
note = str(self.filter_note.get_text()).strip()
regex = self.filter_regex.get_active()
tag = self.tag.get_active() > 0
gen = self.generic.get_active() > 0
empty = not (src_id or src_title or src_author or src_abbr or src_pub or
src_note or
gid or page or date or conf or note or regex or gen)
if empty:
generic_filter = None
else:
generic_filter = GenericCitationFilter()
if gid:
rule = RegExpIdOf([gid], use_regex=regex)
generic_filter.add_rule(rule)
rule = HasCitation([page, date, conf], use_regex=regex)
generic_filter.add_rule(rule)
if src_id:
rule = RegExpSourceIdOf([src_id], use_regex=regex)
generic_filter.add_rule(rule)
rule = HasSource([src_title, src_author, src_abbr, src_pub],
use_regex=regex)
generic_filter.add_rule(rule)
if note:
rule = HasNoteRegexp([note], use_regex=regex)
generic_filter.add_rule(rule)
if src_note:
rule = HasSourceNoteRegexp([src_note], use_regex=regex)
generic_filter.add_rule(rule)
# check the Tag
if tag:
model = self.tag.get_model()
node = self.tag.get_active_iter()
attr = model.get_value(node, 0)
rule = HasTag([attr])
generic_filter.add_rule(rule)
if self.generic.get_active() != 0:
model = self.generic.get_model()
node = self.generic.get_active_iter()
obj = str(model.get_value(node, 0))
rule = MatchesFilter([obj])
generic_filter.add_rule(rule)
return generic_filter
def on_filters_changed(self, name_space):
if name_space == 'Citation':
all_filter = GenericCitationFilter()
all_filter.set_name(_("None"))
all_filter.add_rule(rules.citation.AllCitations([]))
self.generic.set_model(build_filter_model('Citation', [all_filter]))
self.generic.set_active(0)
def on_tags_changed(self, tag_list):
"""
Update the list of tags in the tag filter.
"""
model = Gtk.ListStore(str)
model.append(('',))
for tag_name in tag_list:
model.append((tag_name,))
self.tag.set_model(model)
self.tag.set_active(0)
| gpl-2.0 | 7,421,645,341,716,028,000 | 39.896861 | 86 | 0.566996 | false |
baifendian/harpc | python/bfd/harpc/zkclient.py | 1 | 1884 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Baifendian Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from kazoo.client import KazooClient
from kazoo.retry import KazooRetry
from bfd.harpc.common import utils
from bfd.harpc import settings
class HARpcZKClientManager(KazooClient):
__client_dict = {}
__lock = threading.RLock()
@classmethod
def make(cls, hosts, config, tag):
with cls.__lock:
key = "%s_%s" % (hosts, tag)
if hosts in cls.__client_dict:
return cls.__client_dict.get(key)
else:
client = cls(hosts, config)
client.start()
cls.__client_dict[key] = client
return client
def __init__(self, hosts, config):
self._section_name = utils.get_module(__name__)
self._max_delay = config.getint(self._section_name, "max_retry_delay",
default=settings.DEFAULT_ZK_RETRY_MAX_DELAY)
self._timeout = config.getint(self._section_name, "time_out", default=settings.DEFAULT_ZK_CONNECTION_TIMEOUT)
connection_retry = KazooRetry(max_tries=-1, max_delay=self._max_delay)
super(HARpcZKClientManager, self).__init__(hosts=hosts, timeout=self._timeout,
connection_retry=connection_retry)
| apache-2.0 | 7,831,133,068,014,573,000 | 36.68 | 117 | 0.640658 | false |
hammerlab/isovar | test/test_cli.py | 1 | 1851 | import tempfile
from os import remove
from os.path import getsize, exists
from testing_helpers import data_path
from isovar.cli.isovar_translations import run as isovar_translations
from isovar.cli.isovar_allele_counts import run as isovar_allele_counts
from isovar.cli.isovar_allele_reads import run as isovar_allele_reads
from isovar.cli.isovar_protein_sequences import run as isovar_protein_sequences
from isovar.cli.isovar_reference_contexts import run as isovar_reference_contexts
from isovar.cli.isovar_variant_reads import run as isovar_variant_reads
from isovar.cli.isovar_variant_sequences import run as isovar_variant_sequences
from isovar.cli.isovar_main import run as isovar_main
vcf_args = [
"--vcf",
data_path("data/b16.f10/b16.vcf")
]
args_with_bam = vcf_args + [
"--bam",
data_path("data/b16.f10/b16.combined.sorted.bam")
]
def run_cli_fn(fn, include_bam_in_args=True):
with tempfile.NamedTemporaryFile(delete=False) as f:
output_path = f.name
assert not exists(output_path) == 0
output_args = ["--output", output_path]
if include_bam_in_args:
args = args_with_bam + output_args
else:
args = vcf_args + output_args
fn(args)
assert getsize(output_path) > 0
remove(output_path)
def test_cli_allele_counts():
run_cli_fn(isovar_allele_counts)
def test_cli_allele_reads():
run_cli_fn(isovar_allele_reads)
def test_cli_reference_contexts():
run_cli_fn(isovar_reference_contexts, include_bam_in_args=False)
def test_cli_protein_sequences():
run_cli_fn(isovar_protein_sequences)
def test_cli_translations():
run_cli_fn(isovar_translations)
def test_cli_variant_reads():
run_cli_fn(isovar_variant_reads)
def test_cli_variant_sequences():
run_cli_fn(isovar_variant_sequences)
def test_cli_main():
run_cli_fn(isovar_main) | apache-2.0 | 5,293,166,625,688,406,000 | 25.84058 | 81 | 0.726634 | false |
Aloomaio/googleads-python-lib | examples/ad_manager/v201811/publisher_query_language_service/get_all_browsers.py | 1 | 1899 | #!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all browsers available to target from the Browser table.
Other tables include 'Bandwidth_Group', 'Browser_Language',
'Device_Capability', 'Operating_System', etc...
A full list of available criteria tables can be found at
https://developers.google.com/doubleclick-publishers/docs/reference/v201708/PublisherQueryLanguageService
"""
import tempfile
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize a report downloader.
report_downloader = client.GetDataDownloader(version='v201811')
with tempfile.NamedTemporaryFile(
prefix='browser_data_',
suffix='.csv', mode='w', delete=False) as browser_data_file:
browser_pql_query = ('SELECT Id, BrowserName, MajorVersion, MinorVersion '
'FROM Browser '
'ORDER BY BrowserName ASC')
# Downloads the response from PQL select statement to the specified file
report_downloader.DownloadPqlResultToCsv(
browser_pql_query, browser_data_file)
print 'Saved browser data to... %s' % browser_data_file.name
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| apache-2.0 | 7,414,707,478,289,868,000 | 33.527273 | 105 | 0.730384 | false |
nsmoooose/csp | csp/tools/layout2/scripts/ui/CommandControlFactory.py | 1 | 5436 | import wx
from controls.OutputPane import OutputPane
class CommandControlMediator(object):
"""The purpose of this class is to hold a binding between a command
and several UI controls. This class is responsible for enabling and
disabling UI controls depending on if commands are available or not.
Call the apropriate ObserveXxxYyy functions in this class to make
it observe for changes in documents, controls etc.
"""
def __init__(self):
self.commands = {}
def ObserveDocumentRegistry(self, registry):
"""Attaches this object to necesarry events that the document
registry publishes. Whenever a document is changed, current doc
changed etc we will update necessarry controls."""
registry.GetCurrentDocumentChangedSignal().Connect(self.on_Update)
registry.GetDocumentAddedSignal().Connect(self.on_DocumentAdded)
def on_Update(self, subject):
self.UpdateControls()
def on_DocumentAdded(self, document):
document.GetChangedSignal().Connect(self.on_Update)
document.actionHistory.GetChangedSignal().Connect(self.on_Update)
def AddControl(self, command, control):
"""First argument is the command we wish to remember a binding
for. Second argument is a tuple of (parent control, control id, control)"""
if not self.commands.has_key(command):
self.commands[command] = []
self.commands[command].append(control)
def UpdateControls(self):
"""Iterates all command objects and tests to see if they are
enabled or not. Example: if there is no documents opened the
SaveCurrentDocumentCommand should be disabled in toolbar and
menus."""
for command, controls in self.commands.iteritems():
enabled = command.Enabled()
for parent, id, control in controls:
if control.GetClassName() == "wxToolBarToolBase":
# Toolbar controls must be disabled and
# enabled using the toolbar control in order
# to get the grayed image when disabled.
if parent.GetToolEnabled(id) != enabled:
parent.EnableTool(id, enabled)
else:
if control.IsEnabled() != enabled:
control.Enable(enabled)
class CommandControlFactory(object):
"""A factory that creates ui controls that is bound to command objects."""
def __init__(self, control_mediator):
self.control_mediator = control_mediator
def CreateMenuItems(self, parent, commands, controlid={}):
"""Creates a menu with all the commands specified in the commands array.
A None value in the array means that we want to add a separator."""
menu = wx.Menu()
for command in commands:
# Add a separator if the command is equal to None
if command is None:
menu.AppendSeparator()
continue
controlId = wx.NewId() if not controlid.has_key(command) else controlid[command]
control = self.AppendInMenu(parent, menu, controlId, command)
self.control_mediator.AddControl(command, (menu, controlId, control))
return menu
def AppendInMenu(self, parent, menu, controlId, command):
menuItem = menu.Append(id = controlId, text = command.caption, help = command.tooltip, kind = wx.ITEM_NORMAL)
imageName = command.toolbarimage
if imageName:
bitmap = wx.ArtProvider.GetBitmap(imageName, client = wx.ART_MENU, size = (16, 16))
menuItem.SetBitmap(bitmap)
parent.Bind(wx.EVT_MENU, EventToCommandExecutionAdapter(command).Execute, menuItem)
return menuItem
def CreateToolBarButtons(self, parent, toolbar, commands, controlid={}):
"""Creates a toolbar with all the commands specified in the commands array.
A None value in the array means that we want to add a separator."""
for command in commands:
# Add a separator if the command is equal to None
if command is None:
toolbar.AddSeparator()
continue
controlId = wx.NewId() if not controlid.has_key(command) else controlid[command]
control = self.AppendInToolBar(parent, toolbar, controlId, command)
self.control_mediator.AddControl(command, (toolbar, controlId, control))
toolbar.Realize()
def AppendInToolBar(self, parent, toolbar, controlId, command):
imageName = command.toolbarimage
if not imageName:
imageName = 'generic'
bitmap = wx.ArtProvider.GetBitmap(imageName, client = wx.ART_TOOLBAR, size = toolbar.GetToolBitmapSize())
tool = toolbar.AddLabelTool( id = controlId, label = command.caption, bitmap = bitmap, bmpDisabled = wx.NullBitmap, kind = wx.ITEM_NORMAL, shortHelp = command.tooltip, longHelp = command.tooltip )
parent.Bind(wx.EVT_TOOL, EventToCommandExecutionAdapter(command).Execute, tool)
return tool
class EventToCommandExecutionAdapter(object):
"""This class can be bound to a wx event (click on a menuitem, button, toolbar
button etc). When the event is fired the command sent to the constructor is
executed."""
def __init__(self, command):
"""Constructs this instance with a command object. This command will be
executed when the bound event is fired."""
if not isinstance(command, type):
raise Exception, "The EventToCommandExecutionAdapter takes the command type as parameter. Not an instance. %s" % command.__class__.__name__
self.command = command
def Execute(self, event):
"""Bind this method to a wx event. When the event is fired the command will
be executed."""
# If possible we will print the command execution to the output pane.
if OutputPane.Instance is not None:
OutputPane.Instance.AppendLine("Executing: %s" % self.command.__name__)
# Execute the command.
instance = self.command()
instance.Execute()
| gpl-2.0 | 4,432,801,303,168,567,300 | 39.567164 | 198 | 0.744665 | false |
nippoo/phy | phy/utils/_color.py | 1 | 2042 | # -*- coding: utf-8 -*-
"""Color routines."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import numpy as np
from random import uniform
from colorsys import hsv_to_rgb
#------------------------------------------------------------------------------
# Colors
#------------------------------------------------------------------------------
def _random_color():
"""Generate a random RGB color."""
h, s, v = uniform(0., 1.), uniform(.5, 1.), uniform(.5, 1.)
r, g, b = hsv_to_rgb(h, s, v)
return r, g, b
def _is_bright(rgb):
"""Return whether a RGB color is bright or not."""
r, g, b = rgb
gray = 0.299 * r + 0.587 * g + 0.114 * b
return gray >= .5
def _random_bright_color():
"""Generate a random bright color."""
rgb = _random_color()
while not _is_bright(rgb):
rgb = _random_color()
return rgb
#------------------------------------------------------------------------------
# Default colormap
#------------------------------------------------------------------------------
# Default color map for the selected clusters.
_COLORMAP = np.array([[8, 146, 252],
[255, 2, 2],
[240, 253, 2],
[228, 31, 228],
[2, 217, 2],
[255, 147, 2],
[212, 150, 70],
[205, 131, 201],
[201, 172, 36],
[150, 179, 62],
[95, 188, 122],
[129, 173, 190],
[231, 107, 119],
])
def _selected_clusters_colors(n_clusters=None):
if n_clusters is None:
n_clusters = _COLORMAP.shape[0]
if n_clusters > _COLORMAP.shape[0]:
colors = np.tile(_COLORMAP, (1 + n_clusters // _COLORMAP.shape[0], 1))
else:
colors = _COLORMAP
return colors[:n_clusters, ...] / 255.
| bsd-3-clause | 5,102,880,766,627,608,000 | 28.594203 | 79 | 0.358472 | false |
murvet/dictionary | dictionary/topics/migrations/0005_auto_20160823_1357.py | 1 | 1924 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-23 13:57
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('topics', '0004_auto_20160822_0853'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Kategoriler'},
),
migrations.AlterModelOptions(
name='entry',
options={'verbose_name_plural': 'Yorumlar'},
),
migrations.AlterModelOptions(
name='favoutire',
options={'verbose_name_plural': 'Favoriler'},
),
migrations.AlterField(
model_name='entry',
name='topic',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='topics.Topic'),
),
migrations.AlterField(
model_name='entry',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='favoutire',
name='entry',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='topics.Entry'),
),
migrations.AlterField(
model_name='favoutire',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='topic',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| mit | 139,691,936,656,903,900 | 34.62963 | 133 | 0.598233 | false |
lmtierney/watir-snake | nerodia/support/webserver.py | 1 | 3238 | import logging
import re
import socket
import threading
from os import path, chdir
try:
from urllib import request as urllib_request
except ImportError:
import urllib as urllib_request
try:
from http.server import HTTPServer, SimpleHTTPRequestHandler
from socketserver import ThreadingMixIn
except ImportError:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import ThreadingMixIn
HTML_ROOT = path.abspath(path.join(path.dirname(__file__), '..', '..', 'watir', 'spec',
'watirspec', 'html'))
if not path.isdir(HTML_ROOT):
msg = 'Cannot find HTML directory, make sure you have watir submoduled'
logging.error(msg)
assert 0, msg
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 8000
class RequestHandler(SimpleHTTPRequestHandler):
# Don't do any real posting of data, just page switching
def do_GET(self):
if self.path.endswith('/plain_text'):
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(b'This is text/plain')
elif re.search(r'/set_cookie', self.path):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Set-Cookie', 'monster=1')
self.end_headers()
self.wfile.write(b"<html>C is for cookie, it's good enough for me</html>")
elif not re.search(r'.*\.\w+$', self.path):
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
else:
SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
self.do_GET()
def log_message(self, format, *args):
""" Override to prevent stdout on requests """
pass
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
class WebServer(object):
"""A very basic web server."""
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT):
self.stop_serving = False
self.host = host
self.port = port
while True:
try:
self.server = ThreadedHTTPServer((host, port), RequestHandler)
self.host = host
self.port = port
break
except socket.error:
logging.debug('port {} is in use, trying the next one'.format(port))
port += 1
self.thread = threading.Thread(target=self.run)
def run(self):
logging.debug('web server started')
while not self.stop_serving:
self.server.handle_request()
self.server.server_close()
def start(self):
chdir(HTML_ROOT)
self.thread.start()
def stop(self):
self.stop_serving = True
try:
# This is to force stop the server loop
urllib_request.urlopen('http://{}:{}'.format(self.host, self.port))
except IOError:
pass
logging.info('Shutting down the webserver')
self.thread.join()
def path_for(self, path):
return 'http://{}:{}/{}'.format(self.host, self.port, path)
| mit | -7,264,690,536,604,879,000 | 30.134615 | 87 | 0.601915 | false |
mlperf/training_results_v0.6 | NVIDIA/benchmarks/maskrcnn/implementations/pytorch/maskrcnn_benchmark/layers/roi_align.py | 1 | 2221 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018-2019 NVIDIA CORPORATION. All rights reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from maskrcnn_benchmark import _C
from apex import amp
class _ROIAlign(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = _C.roi_align_forward(
input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
rois, = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_align_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
)
return grad_input, None, None, None, None
roi_align = _ROIAlign.apply
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
@amp.float_function
def forward(self, input, rois):
return roi_align(
input, rois, self.output_size, self.spatial_scale, self.sampling_ratio
)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
| apache-2.0 | 3,996,665,180,457,763,000 | 30.728571 | 85 | 0.606934 | false |
diorcety/translate | translate/convert/test_ini2po.py | 1 | 5063 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pytest import importorskip
from translate.convert import ini2po, test_convert
from translate.misc import wStringIO
importorskip("iniparse")
class TestIni2PO(object):
ConverterClass = ini2po.ini2po
def _convert(self, input_string, template_string=None, blank_msgstr=False,
duplicate_style="msgctxt", dialect="default",
success_expected=True):
"""Helper that converts to target format without using files."""
input_file = wStringIO.StringIO(input_string)
output_file = wStringIO.StringIO()
template_file = None
if template_string:
template_file = wStringIO.StringIO(template_string)
expected_result = 1 if success_expected else 0
converter = self.ConverterClass(input_file, output_file, template_file,
blank_msgstr, duplicate_style, dialect)
assert converter.run() == expected_result
return converter.target_store, output_file
def _convert_to_string(self, *args, **kwargs):
"""Helper that converts to target format string without using files."""
return self._convert(*args, **kwargs)[1].getvalue().decode('utf-8')
def test_convert_empty_file(self):
"""Check converting empty INI returns no output."""
assert self._convert_to_string('', success_expected=False) == ''
def test_convert_no_translation(self):
"""Check converting INI with no translatable text returns no output."""
output = self._convert_to_string('[section]', success_expected=False)
assert output == ''
def test_convert_simple(self):
"""Check the simplest case of converting a translation."""
input_string = """[section]
key=value
"""
expected_output = """#: [section]key
msgid "value"
msgstr ""
"""
output = self._convert_to_string(input_string)
assert expected_output in output
assert "extracted from " in output
def test_no_duplicates(self):
"""Check converting drops duplicates."""
input_string = """[section]
key=value
key=different
"""
expected_output = """#: [section]key
msgid "different"
msgstr ""
"""
output = self._convert_to_string(input_string,
duplicate_style="msgctxt")
assert expected_output in output
output = self._convert_to_string(input_string,
duplicate_style="merge")
assert expected_output in output
def test_merge_simple(self):
"""Check the simplest case of merging a translation."""
input_string = """[section]
key=valor
"""
template_string = """[section]
key=value
"""
expected_output = """#: [section]key
msgid "value"
msgstr "valor"
"""
output = self._convert_to_string(input_string, template_string)
assert expected_output in output
assert "extracted from " in output
def test_merge_misaligned_files(self):
"""Check merging two files that are not aligned."""
input_string = """[section]
other=missing
"""
template_string = """[section]
key=value
"""
expected_output = """#: [section]key
msgid "value"
msgstr ""
"""
assert expected_output in self._convert_to_string(input_string,
template_string)
def test_merge_blank_msgstr(self):
"""Check merging two files returns output without translations."""
input_string = """[section]
key=valor
"""
template_string = """[section]
key=value
"""
expected_output = """#: [section]key
msgid "value"
msgstr ""
"""
assert expected_output in self._convert_to_string(input_string,
template_string,
blank_msgstr=True)
def test_dialects_inno(self):
"""Check that we output correctly for Inno files."""
input_string = """[section]
prop = ṽḁḽṻḝ%tṽḁḽṻḝ2%n
"""
template_string = """[section]
prop = value%tvalue2%n
"""
expected_output = r"""#: [section]prop
msgid "value\tvalue2\n"
msgstr "ṽḁḽṻḝ\tṽḁḽṻḝ2\n"
"""
output = self._convert_to_string(input_string, template_string,
dialect="inno")
assert expected_output in output
class TestIni2POCommand(test_convert.TestConvertCommand, TestIni2PO):
"""Tests running actual ini2po commands on files"""
convertmodule = ini2po
defaultoptions = {"progress": "none"}
def test_help(self, capsys):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self, capsys)
options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE")
options = self.help_check(options, "-P, --pot")
options = self.help_check(options, "--duplicates=DUPLICATESTYLE")
| gpl-2.0 | -6,305,457,702,452,086,000 | 32.486667 | 79 | 0.605216 | false |
YangXuefeng/SWEL | combine_dsm_wn_word.py | 1 | 6546 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 25 15:38:00 2014
@author: Yang Xuefeng
"""
from __future__ import division
import os
import numpy as np
import cPickle as cp
from collections import defaultdict as dd
def exist_number(w1,w2,s):
n = [1 for i in s if w1 in i and w2 in i]
return len(n)
#import gc
#from collections import defaultdict as dd
print 'loading'
s0 = cp.load(open(r'D:\ss\kb\word_set_doc.pkl'))
s1 = cp.load(open(r'D:\ss\kb\word_set_dep.pkl'))
s2 = cp.load(open(r'D:\ss\kb\word_set_senna.pkl'))
s3 = cp.load(open(r'D:\ss\kb\word_set_hlbl.pkl'))
s4 = cp.load(open(r'D:\ss\kb\word_set_skip.pkl'))
s5 = cp.load(open(r'D:\ss\kb\word_set_tomas.pkl'))
s6 = cp.load(open(r'D:\ss\kb\word_set_glove.pkl'))
#s7 = cp.load(open(r'D:\ss\kb\word_set_w2v.pkl'))
s_l = [s0,s1,s2,s3,s4,s5,s6]
#s = set.union(s0,s1,s2,s3,s4,s5,s6)
s = cp.load(open(r'D:\ss\wordlist_glove_50.pkl'))
s = set(s.keys())
words = np.load(r'D:\ss\wordnet_words.npy')
words = [i for i in words if i.isalpha()]
number = cp.load(open(r'D:\ss\kb\numbers.pkl'))
s = set.intersection(s,set(words))
#s = set(words)
data = {i:{} for i in words}
print 'starting'
pathname = r'D:\SS\kb\sense_kb'
names = os.listdir(pathname)
for i in names:
print i
name = pathname + '\\' + i
d = cp.load(open(name))
#count = 0
for j in d.keys():
if j not in s:
continue
#count = count + 1
#print count,j
#maximum = max([k[1] for k in d[j]])
leng = len(d[j])
for k in xrange(len(d[j])):
if d[j][k] in s:
if d[j][k] in data[j]:
data[j][d[j][k]][0] = data[j][d[j][k]][0]+(leng-k)/leng
data[j][d[j][k]][2] = data[j][d[j][k]][2]+1
else:
n = exist_number(j,d[j][k],s_l)
data[j][d[j][k]] = [(leng-k)/leng,n,1]
#print 'deleting'
#del d
#print 'garbage clearing'
#gc.collect()
print 'done, next'
print 'wordnet'
wn_value = open(r'D:\SS\KB\sense\value_a.txt')
wn_key = open(r'D:\SS\KB\sense\key_wn.txt')
wn_key = [i.strip('\n') for i in wn_key]
for i in xrange(len(wn_key)):
if wn_key[i] in s:
line = wn_value.readline()
if line != '':
line = line.strip(',\n')
line = line.split(',')
line = [k for k in line if k in s]
for j in line:
if j in data[wn_key[i]]:
data[wn_key[i]][j][0] = data[wn_key[i]][j][0] + 0.75
data[wn_key[i]][j][2] = data[wn_key[i]][j][2] + 1
else:
n = exist_number(j,wn_key[i],s_l)
data[wn_key[i]][j] = [0.75,n+1,1]
else:
line = wn_value.readline()
wn_value = open(r'D:\SS\KB\sense\value_n.txt')
for i in xrange(len(wn_key)):
if wn_key[i] in s:
line = wn_value.readline()
if line != '':
line = line.strip(',\n')
line = line.split(',')
line = [k for k in line if k in s]
for j in line:
if j in data[wn_key[i]]:
data[wn_key[i]][j][0] = data[wn_key[i]][j][0] + 1
data[wn_key[i]][j][2] = data[wn_key[i]][j][2] + 1
else:
n = exist_number(j,wn_key[i],s_l)
data[wn_key[i]][j] = [1,1+n,1]
else:
line = wn_value.readline()
wn_value = open(r'D:\SS\KB\sense\value_v.txt')
for i in xrange(len(wn_key)):
if wn_key[i] in s:
line = wn_value.readline()
if line != '':
line = line.strip(',\n')
line = line.split(',')
line = [k for k in line if k in s]
for j in line:
if j in data[wn_key[i]]:
data[wn_key[i]][j][0] = data[wn_key[i]][j][0] + 0.5
data[wn_key[i]][j][2] = data[wn_key[i]][j][2] + 1
else:
n = exist_number(j,wn_key[i],s_l)
data[wn_key[i]][j] = [0.5,n+1,1]
else:
line = wn_value.readline()
wn_value = open(r'D:\SS\KB\sense\value_r.txt')
for i in xrange(len(wn_key)):
if wn_key[i] in s:
line = wn_value.readline()
if line != '':
line = line.strip(',\n')
line = line.split(',')
line = [k for k in line if k in s]
for j in line:
if j in data[wn_key[i]]:
data[wn_key[i]][j][0] = data[wn_key[i]][j][0] + 0.75
data[wn_key[i]][j][2] = data[wn_key[i]][j][2] + 1
else:
n = exist_number(j,wn_key[i],s_l)
data[wn_key[i]][j] = [0.75,1+n,1]
else:
line = wn_value.readline()
wn_value = open(r'D:\SS\KB\sense\value_s.txt')
for i in xrange(len(wn_key)):
if wn_key[i] in s:
line = wn_value.readline()
if line != '':
line = line.strip(',\n')
line = line.split(',')
line = [k for k in line if k in s]
for j in line:
if j in data[wn_key[i]]:
data[wn_key[i]][j][0] = data[wn_key[i]][j][0] + 0.75
data[wn_key[i]][j][2] = data[wn_key[i]][j][2] + 1
else:
n = exist_number(j,wn_key[i],s_l)
data[wn_key[i]][j] = [0.75,1+n,1]
else:
line = wn_value.readline()
print 'calculate nummber'
#d = {i:{} for i in words}
for i in data.keys():
for j in data[i].keys():
if data[i][j][2]>1:
data[i][j] = data[i][j][0] / data[i][j][1]
else:
data[i][j] = 0
print 'processing numbers'
for i in data.keys():
if i not in number:
data[i] = {k:data[i][k] for k in data[i].keys() if k not in number and data[i][k]>=0.1}
print 'output'
fk = open(r'D:\ss\kb\sense\key_word.txt','w')
fv = open(r'D:\ss\kb\sense\value_word.txt','w')
for i in data.keys():
fk.write(i)
fk.write('\n')
items = data[i].items()
#items = [k for k in items if k[1]>1.01]
items.sort(key = lambda x:x[1])
items = items[::-1]
#if len(items) > 200:
# items = items[0:200]
for p in items:
fv.write(p[0]+':'+str(p[1])+',')
fv.write('\n')
print i, len(items)
fk.close()
fv.close()
#f_d = {i:data[i] for i in data.keys() if len(data[i])!=0}
#f_d = {i:f_d[i].items() for i in f_d.keys()}
#for i in f_d.keys():
# f_d[i].sort(key=lambda x:x[1])
#del data
#gc.collect()
#cp.dump(f_d,open(r'D:\ss\kb\sense\dsm.pkl','w'))
| mit | -1,585,718,721,414,777,300 | 31.899497 | 95 | 0.480752 | false |
nonZero/OpenCommunity | src/users/permissions.py | 1 | 2686 | from acl.default_roles import DefaultGroups, ALL_PERMISSIONS
"""
These functions work with anonymous users as well, and therefore are not a
part of the OCUser model.
"""
def load_community_permissions(user, community, committee=None):
from users.models import Membership
if user.is_authenticated():
try:
all_perms = set()
memberships = user.memberships.filter(community=community)
for m in memberships:
if committee:
perms = m.get_committee_group_permissions(committee)
else:
perms = m.get_permissions(community)
all_perms.update(perms)
return all_perms
except Membership.DoesNotExist:
pass
if community.is_public:
return DefaultGroups.permissions[DefaultGroups.MEMBER]
return []
def get_community_permissions(user, community, committee=None):
""" returns a cached list of permissions for a community and a user """
if not hasattr(user, '_community_permissions_cache'):
user._community_permissions_cache = {}
if community.id not in user._community_permissions_cache:
perms = load_community_permissions(user, community, committee)
user._community_permissions_cache[community.id] = perms
return user._community_permissions_cache[community.id]
def get_committee_permissions(user, committee):
""" returns a cached list of permissions for a community and a user """
if not hasattr(user, '_committee_permissions_cache'):
user._committee_permissions_cache = {}
if committee.id not in user._committee_permissions_cache:
perms = load_community_permissions(user, committee.community, committee)
user._committee_permissions_cache[committee.id] = perms
return user._committee_permissions_cache[committee.id]
###################################
def has_community_perm(user, community, perm):
if user.is_active and user.is_superuser:
return True
return perm in get_community_permissions(user, community)
def get_community_perms(user, community):
if user.is_active and user.is_superuser:
perms = ALL_PERMISSIONS
else:
perms = get_community_permissions(user, community)
return perms
def has_committee_perm(user, committee, perm):
if user.is_active and user.is_superuser:
return True
return perm in get_committee_permissions(user, committee)
def get_committee_perms(user, committee):
if user.is_active and user.is_superuser:
perms = ALL_PERMISSIONS
else:
perms = get_committee_permissions(user, committee)
return perms
| bsd-3-clause | -4,073,170,830,451,126,000 | 29.522727 | 80 | 0.669769 | false |
Motsai/neblina-python | neblinaUART.py | 1 | 2947 | #!/usr/bin/env python
###################################################################################
#
# Copyright (c) 2010-2016 Motsai
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###################################################################################
import logging
import serial
import time
import os
from neblinaCommunication import NeblinaCommunication
from pyslip import slip
###################################################################################
class NeblinaUART(NeblinaCommunication):
def __init__(self, address):
NeblinaCommunication.__init__(self, address)
self.comslip = slip.slip()
self.sc = None
def connect(self):
# Try to open the serial COM port
logging.debug("Opening COM port : {0}".format(self.address))
self.sc = None
while self.sc is None:
try:
self.sc = serial.Serial(port=self.address, baudrate=500000)
except serial.serialutil.SerialException as se:
if 'Device or resource busy:' in se.__str__():
logging.info('Opening COM port is taking a little while, please stand by...')
else:
logging.error('se: {0}'.format(se))
time.sleep(1)
self.sc.flushInput()
def disconnect(self):
logging.debug("Closing COM port : {0}".format(self.address))
self.sc.close()
def isConnected(self):
if os.name == "posix":
return self.sc and self.sc.is_open
else:
return self.sc and self.sc.isOpen()
def receivePacket(self):
packet = None
try:
packet = self.comslip.receivePacketFromStream(self.sc)
except KeyboardInterrupt:
pass
return packet
def sendPacket(self, packet):
self.comslip.sendPacketToStream(self.sc, packet)
| mit | -3,687,489,549,640,990,000 | 34.939024 | 97 | 0.609433 | false |
jcatw/scnn | scnn/baseline_node_experiment.py | 1 | 3500 | __author__ = 'jatwood'
import sys
import numpy as np
from sklearn.metrics import f1_score, accuracy_score
from sklearn.linear_model import LogisticRegression
import data
import kernel
def baseline_node_experiment(model_fn, data_fn, data_name, model_name):
print 'Running node experiment (%s)...' % (data_name,)
A, X, Y = data_fn()
n_nodes = A.shape[0]
indices = np.arange(n_nodes)
np.random.shuffle(indices)
train_indices = indices[:n_nodes // 3]
valid_indices = indices[n_nodes // 3:(2* n_nodes) // 3]
test_indices = indices[(2* n_nodes) // 3:]
best_C = None
best_acc = float('-inf')
for C in [10**(-x) for x in range(-4,4)]:
m = model_fn(C)
m.fit(X[train_indices,:], np.argmax(Y[train_indices,:],1))
preds = m.predict(X[valid_indices])
actuals = np.argmax(Y[valid_indices,:],1)
accuracy = accuracy_score(actuals, preds)
if accuracy > best_acc:
best_C = C
best_acc = accuracy
m = model_fn(best_C)
m.fit(X[train_indices], np.argmax(Y[train_indices],1))
preds = m.predict(X[test_indices])
actuals = np.argmax(Y[test_indices,:],1)
accuracy = accuracy_score(actuals, preds)
f1_micro = f1_score(actuals, preds, average='micro')
f1_macro = f1_score(actuals, preds, average='macro')
print 'form: name,micro_f,macro_f,accuracy'
print '###RESULTS###: %s,%s,%.8f,%.8f,%.8f' % (data_name, model_name, f1_micro, f1_macro, accuracy)
def kernel_node_experiment(model, data_fn, data_name, model_name):
print 'Running node experiment (%s)...' % (data_name,)
A, X, Y = data_fn()
n_nodes = A.shape[0]
indices = np.arange(n_nodes)
np.random.shuffle(indices)
train_indices = indices[:n_nodes // 3]
valid_indices = indices[n_nodes // 3:(2* n_nodes) // 3]
test_indices = indices[(2* n_nodes) // 3:]
model.fit_with_validation(A,Y, train_indices, valid_indices, test_indices)
preds = model.predict(Y, valid_indices, test_indices)
actuals = Y[test_indices,:]
accuracy = accuracy_score(actuals, preds)
f1_micro = f1_score(actuals, preds, average='micro')
f1_macro = f1_score(actuals, preds, average='macro')
print 'form: name,micro_f,macro_f,accuracy'
print '###RESULTS###: %s,%s,%.8f,%.8f,%.8f' % (data_name, model_name, f1_micro, f1_macro, accuracy)
if __name__ == '__main__':
np.random.seed()
args = sys.argv[1:]
if len(args) == 0:
baseline_node_experiment(data.parse_cora, 'cora', 2)
else:
name_to_data = {
'cora': data.parse_cora,
'pubmed': data.parse_pubmed,
'blogcatalog': data.parse_blogcatalog,
}
baseline_models = {
'logisticl1': lambda C: LogisticRegression(penalty='l1', C=C),
'logisticl2': lambda C: LogisticRegression(penalty='l2', C=C),
}
kernel_models = {
'ked': kernel.ExponentialDiffusionKernel(),
'kled': kernel.LaplacianExponentialDiffusionKernel(),
}
data_name = args[0]
data_fn = name_to_data[data_name]
model_name = args[1]
if model_name in baseline_models:
baseline_node_experiment(baseline_models[model_name], data_fn, data_name, model_name)
elif model_name in kernel_models:
kernel_node_experiment(kernel_models[model_name], data_fn, data_name, model_name)
else:
print '%s not recognized' % (model_name,)
| mit | 3,537,207,355,883,161,600 | 29.434783 | 103 | 0.602 | false |
GammaC0de/pyload | src/pyload/plugins/addons/DeathByCaptcha.py | 1 | 7164 | # -*- coding: utf-8 -*-
import base64
import json
import re
import time
import pycurl
from pyload.core.network.http.exceptions import BadHeader
from pyload.core.network.request_factory import get_request
from ..base.addon import BaseAddon, threaded
class DeathByCaptchaException(Exception):
DBC_ERRORS = {
"not-logged-in": "Access denied, check your credentials",
"invalid-credentials": "Access denied, check your credentials",
"banned": "Access denied, account is suspended",
"insufficient-funds": "Insufficient account balance to decrypt CAPTCHA",
"invalid-captcha": "CAPTCHA is not a valid image",
"service-overload": "CAPTCHA was rejected due to service overload, try again later",
"invalid-request": "Invalid request",
"timed-out": "No CAPTCHA solution received in time",
}
def __init__(self, err):
self.err = err
def get_code(self):
return self.err
def get_desc(self):
if self.err in self.DBC_ERRORS.keys():
return self.DBC_ERRORS[self.err]
else:
return self.err
def __str__(self):
return "<DeathByCaptchaException {}>".format(self.err)
def __repr__(self):
return "<DeathByCaptchaException {}>".format(self.err)
class DeathByCaptcha(BaseAddon):
__name__ = "DeathByCaptcha"
__type__ = "addon"
__version__ = "0.16"
__status__ = "testing"
__pyload_version__ = "0.5"
__config__ = [
("enabled", "bool", "Activated", False),
("username", "str", "Username", ""),
("password", "password", "Password", ""),
("check_client", "bool", "Don't use if client is connected", True),
]
__description__ = """Send captchas to DeathByCaptcha.com"""
__license__ = "GPLv3"
__authors__ = [("RaNaN", "[email protected]"), ("zoidberg", "[email protected]")]
API_URL = "http://api.dbcapi.me/api/"
def api_response(self, api="captcha", post=False, multipart=False):
with get_request() as req:
req.c.setopt(
pycurl.HTTPHEADER,
[
"Accept: application/json",
f"User-Agent: pyLoad {self.pyload.version}",
],
)
if post:
if not isinstance(post, dict):
post = {}
post.update(
{
"username": self.config.get("username"),
"password": self.config.get("password"),
}
)
res = None
try:
html = self.load(
"{}{}".format(self.API_URL, api),
post=post,
multipart=multipart,
req=req,
)
self.log_debug(html)
res = json.loads(html)
if "error" in res:
raise DeathByCaptchaException(res["error"])
elif "status" not in res:
raise DeathByCaptchaException(str(res))
except BadHeader as exc:
if exc.code == 403:
raise DeathByCaptchaException("not-logged-in")
elif exc.code == 413:
raise DeathByCaptchaException("invalid-captcha")
elif exc.code == 503:
raise DeathByCaptchaException("service-overload")
elif exc.code in (400, 405):
raise DeathByCaptchaException("invalid-request")
else:
raise
return res
def get_credits(self):
res = self.api_response("user", True)
if "is_banned" in res and res["is_banned"]:
raise DeathByCaptchaException("banned")
elif "balance" in res and "rate" in res:
self.info.update(res)
else:
raise DeathByCaptchaException(res)
def get_status(self):
res = self.api_response("status", False)
if "is_service_overloaded" in res and res["is_service_overloaded"]:
raise DeathByCaptchaException("service-overload")
def submit(self, captcha, captcha_type="file", match=None):
# NOTE: Workaround multipart-post bug in HTTPRequest.py
if re.match(r"^\w*$", self.config.get("password")):
multipart = True
data = (pycurl.FORM_FILE, captcha)
else:
multipart = False
with open(captcha, mode="rb") as fp:
data = fp.read()
data = "base64:" + base64.b64encode(data)
res = self.api_response("captcha", {"captchafile": data}, multipart)
if "captcha" not in res:
raise DeathByCaptchaException(res)
ticket = res["captcha"]
for _ in range(24):
time.sleep(5)
res = self.api_response("captcha/{}".format(ticket), False)
if res["text"] and res["is_correct"]:
break
else:
raise DeathByCaptchaException("timed-out")
result = res["text"]
self.log_debug(f"Result {ticket}: {result}")
return ticket, result
def captcha_task(self, task):
if "service" in task.data:
return False
if not task.is_textual():
return False
if not self.config.get("username") or not self.config.get("password"):
return False
if self.pyload.is_client_connected() and self.config.get("check_client"):
return False
try:
self.get_status()
self.get_credits()
except DeathByCaptchaException as exc:
self.log_error(exc)
return False
balance, rate = self.info["balance"], self.info["rate"]
self.log_info(
self._("Account balance"),
self._("US${:.3f} ({} captchas left at {:.2f} cents each)").format(
balance // 100, balance // rate, rate
),
)
if balance > rate:
task.handler.append(self)
task.data["service"] = self.classname
task.set_waiting(180)
self._process_captcha(task)
def captcha_invalid(self, task):
if task.data["service"] == self.classname and "ticket" in task.data:
try:
res = self.api_response(
"captcha/{}/report".format(task.data["ticket"]), True
)
except DeathByCaptchaException as exc:
self.log_error(exc)
except Exception as exc:
self.log_error(
exc,
exc_info=self.pyload.debug > 1,
stack_info=self.pyload.debug > 2,
)
@threaded
def _process_captcha(self, task):
c = task.captcha_params["file"]
try:
ticket, result = self.submit(c)
except DeathByCaptchaException as exc:
task.error = exc.get_code()
self.log_error(exc)
return
task.data["ticket"] = ticket
task.set_result(result)
| agpl-3.0 | -3,831,024,778,523,157,500 | 30.147826 | 92 | 0.526103 | false |
smutt/dpkt | dpkt/ip6.py | 1 | 11795 | # $Id: ip6.py 87 2013-03-05 19:41:04Z [email protected] $
# -*- coding: utf-8 -*-
"""Internet Protocol, version 6."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
from . import ip
from .decorators import deprecated
from .compat import compat_ord
class IP6(dpkt.Packet):
"""Internet Protocol, version 6.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of IPv6.
TODO.
"""
__hdr__ = (
('_v_fc_flow', 'I', 0x60000000),
('plen', 'H', 0), # payload length (not including header)
('nxt', 'B', 0), # next header protocol
('hlim', 'B', 0), # hop limit
('src', '16s', ''),
('dst', '16s', '')
)
_protosw = ip.IP._protosw
@property
def v(self):
return self._v_fc_flow >> 28
@v.setter
def v(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xf0000000) | (v << 28)
@property
def fc(self):
return (self._v_fc_flow >> 20) & 0xff
@fc.setter
def fc(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xff00000) | (v << 20)
@property
def flow(self):
return self._v_fc_flow & 0xfffff
@flow.setter
def flow(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.extension_hdrs = {}
if self.plen:
buf = self.data[:self.plen]
else: # due to jumbo payload or TSO
buf = self.data
next_ext_hdr = self.nxt
while next_ext_hdr in ext_hdrs:
ext = ext_hdrs_cls[next_ext_hdr](buf)
self.extension_hdrs[next_ext_hdr] = ext
buf = buf[ext.length:]
next_ext_hdr = getattr(ext, 'nxt', None)
# set the payload protocol id
if next_ext_hdr is not None:
self.p = next_ext_hdr
try:
self.data = self._protosw[next_ext_hdr](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def headers_str(self):
"""Output extension headers in order defined in RFC1883 (except dest opts)"""
header_str = b""
for hdr in ext_hdrs:
if hdr in self.extension_hdrs:
header_str += bytes(self.extension_hdrs[hdr])
return header_str
def __bytes__(self):
if (self.p == 6 or self.p == 17 or self.p == 58) and not self.data.sum:
# XXX - set TCP, UDP, and ICMPv6 checksums
p = bytes(self.data)
s = dpkt.struct.pack('>16s16sxBH', self.src, self.dst, self.nxt, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
try:
self.data.sum = dpkt.in_cksum_done(s)
except AttributeError:
pass
return self.pack_hdr() + self.headers_str() + bytes(self.data)
@classmethod
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
@classmethod
def get_proto(cls, p):
return cls._protosw[p]
class IP6ExtensionHeader(dpkt.Packet):
"""
An extension header is very similar to a 'sub-packet'.
We just want to re-use all the hdr unpacking etc.
"""
pass
class IP6OptsHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0) # option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 1) * 8
options = []
index = 0
while index < self.length - 2:
opt_type = compat_ord(self.data[index])
# PAD1 option
if opt_type == 0:
index += 1
continue
opt_length = compat_ord(self.data[index + 1])
if opt_type == 1: # PADN option
# PADN uses opt_length bytes in total
index += opt_length + 2
continue
options.append(
{'type': opt_type, 'opt_length': opt_length, 'data': self.data[index + 2:index + 2 + opt_length]})
# add the two chars and the option_length, to move to the next option
index += opt_length + 2
self.options = options
class IP6HopOptsHeader(IP6OptsHeader):
pass
class IP6DstOptsHeader(IP6OptsHeader):
pass
class IP6RoutingHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
('type', 'B', 0), # routing type (currently, only 0 is used)
('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
('rsvd_sl_bits', 'I', 0), # reserved (1 byte), strict/loose bitmap for addresses
)
@property
def sl_bits(self):
return self.rsvd_sl_bits & 0xffffff
@sl_bits.setter
def sl_bits(self, v):
self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
hdr_size = 8
addr_size = 16
dpkt.Packet.unpack(self, buf)
addresses = []
num_addresses = self.len // 2
buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
for i in range(num_addresses):
addresses.append(buf[i * addr_size: i * addr_size + addr_size])
self.data = buf
self.addresses = addresses
self.length = self.len * 8 + 8
class IP6FragmentHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('resv', 'B', 0), # reserved, set to 0
('frag_off_resv_m', 'H', 0), # frag offset (13 bits), reserved zero (2 bits), More frags flag
('id', 'I', 0) # fragments id
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__
self.data = b''
@property
def frag_off(self):
return self.frag_off_resv_m >> 3
@frag_off.setter
def frag_off(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
@property
def m_flag(self):
return self.frag_off_resv_m & 1
@m_flag.setter
def m_flag(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 2) * 4
self.auth_data = self.data[:(self.len - 1) * 4]
class IP6ESPHeader(IP6ExtensionHeader):
__hdr__ = (
('spi', 'I', 0),
('seq', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__ + len(self.data)
ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP,
ip.IP_PROTO_DSTOPTS]
ext_hdrs_cls = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
ip.IP_PROTO_ROUTING: IP6RoutingHeader,
ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
ip.IP_PROTO_ESP: IP6ESPHeader,
ip.IP_PROTO_AH: IP6AHHeader,
ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
def test_ipg():
s = b'`\x00\x00\x00\x00(\x06@\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11$\xff\xfe\x8c\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80r\xcd\xca\x00\x16\x04\x84F\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\t\x00\x00\x02\x04\x05\xa0\x01\x03\x03\x00\x01\x01\x08\n}\x185?\x00\x00\x00\x00'
_ip = IP6(s)
# print `ip`
_ip.data.sum = 0
s2 = bytes(_ip)
IP6(s)
# print `ip2`
assert (s == s2)
def test_ip6_routing_header():
s = b'`\x00\x00\x00\x00<+@ H\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca G\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02\x00\x00\x00\x00 \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00P\x00\x00\x00\x00\x00\x00\x00\x00P\x02 \x00\x91\x7f\x00\x00'
ip = IP6(s)
s2 = bytes(ip)
# 43 is Routing header id
assert (len(ip.extension_hdrs[43].addresses) == 2)
assert ip.tcp
assert (s == s2)
assert bytes(ip) == s
def test_ip6_fragment_header():
s = b'\x06\xee\xff\xfb\x00\x00\xff\xff'
fh = IP6FragmentHeader(s)
# s2 = str(fh) variable 's2' is not used
bytes(fh)
assert (fh.nxt == 6)
assert (fh.id == 65535)
assert (fh.frag_off == 8191)
assert (fh.m_flag == 1)
assert bytes(fh) == s
# IP6 with fragment header
s = b'\x60\x00\x00\x00\x00\x10\x2c\x00\x02\x22\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x03\x33\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x29\x00\x00\x01\x00\x00\x00\x00\x60\x00\x00\x00\x00\x10\x2c\x00'
ip = IP6(s)
assert bytes(ip) == s
def test_ip6_options_header():
s = b';\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00'
options = IP6OptsHeader(s).options
assert (len(options) == 3)
assert bytes(IP6OptsHeader(s)) == s
def test_ip6_ah_header():
s = b';\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ah = IP6AHHeader(s)
assert (ah.length == 24)
assert (ah.auth_data == b'xxxxxxxx')
assert (ah.spi == 0x2020202)
assert (ah.seq == 0x1010101)
assert bytes(ah) == s
def test_ip6_esp_header():
s = b'\x00\x00\x01\x00\x00\x00\x00\x44\xe2\x4f\x9e\x68\xf3\xcd\xb1\x5f\x61\x65\x42\x8b\x78\x0b\x4a\xfd\x13\xf0\x15\x98\xf5\x55\x16\xa8\x12\xb3\xb8\x4d\xbc\x16\xb2\x14\xbe\x3d\xf9\x96\xd4\xa0\x39\x1f\x85\x74\x25\x81\x83\xa6\x0d\x99\xb6\xba\xa3\xcc\xb6\xe0\x9a\x78\xee\xf2\xaf\x9a'
esp = IP6ESPHeader(s)
assert esp.length == 68
assert esp.spi == 256
assert bytes(esp) == s
def test_ip6_extension_headers():
p = b'`\x00\x00\x00\x00<+@ H\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca G\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02\x00\x00\x00\x00 \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00P\x00\x00\x00\x00\x00\x00\x00\x00P\x02 \x00\x91\x7f\x00\x00'
ip = IP6(p)
o = b';\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00'
options = IP6HopOptsHeader(o)
ip.extension_hdrs[0] = options
fh = b'\x06\xee\xff\xfb\x00\x00\xff\xff'
ip.extension_hdrs[44] = IP6FragmentHeader(fh)
ah = b';\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ip.extension_hdrs[51] = IP6AHHeader(ah)
do = b';\x02\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
ip.extension_hdrs[60] = IP6DstOptsHeader(do)
assert (len(ip.extension_hdrs) == 5)
if __name__ == '__main__':
test_ipg()
test_ip6_routing_header()
test_ip6_fragment_header()
test_ip6_options_header()
test_ip6_ah_header()
test_ip6_esp_header()
test_ip6_extension_headers()
print('Tests Successful...')
| bsd-3-clause | -627,008,085,343,735,400 | 31.946927 | 369 | 0.583891 | false |
bgribble/mfp | mfp/scale.py | 1 | 1168 |
class Scale (object):
def __init__(self):
pass
class Tuning (object):
def __init__(self):
pass
class EqualTemper (Tuning):
def __init__(self, a4=440.0):
Tuning.__init__(self)
self.base_frequencies = [261.63, 277.18, 293.66, 311.13, 329.63,
349.23, 369.99, 392.00, 415.30, 440.00,
466.16, 493.88]
self.a4_freq = a4
self.frequencies = []
self.retune(self.a4_freq)
def retune(self, a4):
scaling = a4 / 440.0
self.a4_freq = a4
self.frequencies = [f * scaling for f in self.base_frequencies]
def freq(self, octave, tone):
octave_scale = 2.0 ** (octave - 4)
return self.frequencies[tone] * octave_scale
class Chromatic (Scale):
def __init__(self, transpose=0):
Scale.__init__(self)
self.transpose_semis = transpose
def transpose(self, offset):
self.transpose_semis = offset
def midinote(self, keynum):
note = keynum + self.transpose_semis
octave = int(note) // 12 - 2
tone = int(note) % 12
return (octave, tone)
| gpl-2.0 | 6,901,891,649,023,727,000 | 24.955556 | 72 | 0.536815 | false |
21strun/django-maintenancemode | maintenancemode/middleware.py | 1 | 2206 | #!/usr/bin/env python
# coding: utf-8
import re
import django
from django.conf import settings
from django.core import urlresolvers
from .models import MaintenanceMode
if django.VERSION[:2] <= (1, 3):
from django.conf.urls import defaults as urls
else:
from django.conf import urls
from maintenancemode.conf.settings.defaults import (
MAINTENANCE_MODE, MAINTENANCE_IGNORE_URLS)
urls.handler503 = 'maintenancemode.views.defaults.temporary_unavailable'
urls.__all__.append('handler503')
IGNORE_URLS = [re.compile(url) for url in MAINTENANCE_IGNORE_URLS]
class MaintenanceModeMiddleware(object):
def process_request(self, request):
# Allow access if middleware is not activated
enabled_maintenance_modes = \
MaintenanceMode.objects.filter(enable_maintenance_mode=True)
enabled_maintenance_mode = \
enabled_maintenance_modes[0] if enabled_maintenance_modes else None
if not MAINTENANCE_MODE and not enabled_maintenance_mode:
return None
# Allow access if remote ip is in INTERNAL_IPS
if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS:
return None
# Allow access if the user doing the request is logged in and a
# superuser.
superuser_from_model_enabled = enabled_maintenance_mode.allow_superuser
ALLOW_SU = getattr(settings, 'MAINTENANCE_ALLOW_SU', False)
if (
(ALLOW_SU or superuser_from_model_enabled) and
hasattr(request, 'user') and
request.user.is_superuser
):
return None
# update list of ignored urls with ignored url from model
for ignore_url in enabled_maintenance_mode.ignored_urls.all():
IGNORE_URLS.append(re.compile(ignore_url.url))
# Check if a path is explicitly excluded from maintenance mode
for url in IGNORE_URLS:
if url.match(request.path_info):
return None
# Otherwise show the user the 503 page
resolver = urlresolvers.get_resolver(None)
callback, param_dict = resolver.resolve_error_handler('503')
return callback(request, **param_dict)
| bsd-3-clause | 3,300,159,293,919,983,600 | 32.938462 | 79 | 0.670444 | false |
malon/presupuesto | budget_app/loaders/aragon_bulk_budget_loader.py | 1 | 24371 | # -*- coding: UTF-8 -*-
from budget_app.models import *
from collections import namedtuple
from decimal import *
import csv
import os
import re
class AragonBulkBudgetLoader:
BudgetId = namedtuple('BudgetId', 'entity_id year')
Uid = namedtuple('Uid', 'dimension is_expense is_actual chapter article concept subconcept')
Item = namedtuple('Item', 'description amount')
FunctionalId = namedtuple('FunctionalId', 'policy group function')
has_budget_data = {}
has_actual_data = {}
functional_areas = {}
def load(self, level, path):
# Parse the incoming data and keep in memory
budget_items = {}
economic_filename = os.path.join(path, 'clasificacion_economica.csv')
self.parse_budget_data(budget_items, level, economic_filename)
non_xbrl_filename = os.path.join(path, 'no_xbrl.csv')
self.parse_non_xbrl_data(budget_items, level, non_xbrl_filename)
functional_filename = os.path.join(path, 'clasificacion_funcional.csv')
self.parse_budget_data(budget_items, level, functional_filename)
self.load_functional_area_fix(path)
# Now load the data one budget at a time
for budget_id in budget_items:
self.load_budget(level, path, budget_id.entity_id, budget_id.year, budget_items[budget_id])
def parse_budget_data(self, budget_items, level, filename):
# Group the incoming data (unsorted, and involving many different entities) by year/entity
# Note: Since the structure of the incoming data is still changing at this time, and usually
# the data comes in a per-year-per-entity, it feels better to load data this way, allowing
# us to accomodate later on data files with only subsets of the overall domain.
reader = csv.reader(open(filename, 'rb'), delimiter=';')
for index, line in enumerate(reader):
if re.match("^#", line[0]): # Ignore comments
continue
if re.match("^ +$", line[0]): # Ignore empty lines
continue
if not re.match("^ ", line[0]): # Ignore lines with SQL commands
continue
# Sigh. Deal with slightly different formats
# if level == 'municipio' and 'clasificacion_economica.csv' in filename: # Remove budget id
# line.pop(0)
if level == 'municipio': # Remove province id
line[4] = line[3].strip()+line[4].strip()
line.pop(3)
# Ignore data before 2011, the one we have is crappy
year = line[0].strip()
if (int(year) < 2011):
continue
# Ignore chapter 0 data, which is sometimes present as a sum of the other chapters.
# If it was always there, and was correct, we could use it to validate the data we're loading
# but it's not the case: 99% of the time missing, and when present often nonsense. If we
# left it in it'd create mayhem when present, because we'd double count it.
chapter = line[5].strip()
if (chapter == '0'):
continue
# Ignore zero amounts. We used to do this when inserting into the DB, but it's useful to
# know it at this point, so we can fall back into non-XBRL data if needed. There's a
# bit of duplication, calculating the amount twice, but, good enough.
amount = self._get_amount(line)
if amount == 0:
continue
# Finally, we have useful data
budget_id = AragonBulkBudgetLoader.BudgetId(line[3], year)
if not budget_id in budget_items:
budget_items[budget_id] = []
budget_items[budget_id].append(line)
# Keep track of what we have
if self._is_actual(line):
AragonBulkBudgetLoader.has_actual_data[budget_id] = True
else:
AragonBulkBudgetLoader.has_budget_data[budget_id] = True
def parse_non_xbrl_data(self, budget_items, level, filename):
reader = csv.reader(open(filename, 'rb'), delimiter=';')
for index, line in enumerate(reader):
if re.match("^[A-Z]+", line[0]): # Ignore title
continue
if re.match("^ +$", line[0]): # Ignore empty lines
continue
if re.match("^ +$", line[2]): # Ignore lines with empty years (sigh)
continue
# Ignore data before 2011, the one we have is crappy
year = line[2].strip()
if (int(year) < 2011):
continue
# Retrieve entity id
if level == 'municipio':
entity_name = line[83] # not really used, but handy
entity_id = line[84]
else:
entity_name = line[81] # not really used, but handy
entity_id = line[82]
if ( len(entity_id)<2 ): # Zero padding county codes
entity_id = "0"+entity_id
# Retrieve economic data
income_budget = line[3:12]
expense_budget = line[12:21]
income_actual = line[21:30]
expense_actual = line[30:39]
# Convert it to the XBRL data format, and store it for further processing.
# Note that we only use this data if the more detailed one coming via XBRL
# (which we have loaded already in this very same script) does not exist.
budget_id = AragonBulkBudgetLoader.BudgetId(entity_id, year)
if not budget_id in budget_items:
budget_items[budget_id] = []
budget_items[budget_id].extend(self.non_xbrl_summary_as_lines(income_budget, 'I', 'PRESUPUESTO'))
budget_items[budget_id].extend(self.non_xbrl_summary_as_lines(expense_budget, 'G', 'PRESUPUESTO'))
budget_items[budget_id].extend(self.non_xbrl_summary_as_lines(income_actual, 'I', 'LIQUIDACION'))
budget_items[budget_id].extend(self.non_xbrl_summary_as_lines(expense_actual, 'G', 'LIQUIDACION'))
# Keep track of what we have
AragonBulkBudgetLoader.has_actual_data[budget_id] = True
AragonBulkBudgetLoader.has_budget_data[budget_id] = True
# Convert a non-XBRL one-line summary into an array of lines matching the format of
# the XBRL data files
def non_xbrl_summary_as_lines(self, data_items, is_expense, is_actual):
lines = []
for index, line in enumerate(data_items):
chapter = str(index+1)
lines.append([
None, # year, not used onwards
is_expense,
is_actual,
None, # entity id, not used
None, # entity name, not used
chapter,
"", # article
"", # concept
"", # subconcept
"Capítulo "+chapter,
"",
data_items[index], # amount for budget lines
data_items[index] # amount for execution budget lines
])
return lines
def load_budget(self, level, path, entity_id, year, items):
# Find the public body the budget relates to
entity = Entity.objects.filter(level=level, code=entity_id)
if not entity:
raise Exception("Entity (%s/%s) not found" % (level, entity_id))
else:
entity = entity[0]
print u"Cargando presupuesto para entidad '%s' año %s..." % (entity.name, year)
# Check whether the budget exists already
budget = Budget.objects.filter(entity=entity, year=year)
if not budget:
# Create the budget if needed in the database
budget = Budget(entity=entity, year=year)
budget.save()
# Load the economic and functional classification from a manually edited file
# XXX: Could we share this across budgets?
self.load_economic_classification(path, budget)
self.load_functional_classification(path, budget)
else:
budget = budget[0]
# Delete previous budget for the given entity/year if it exists
budget_id = AragonBulkBudgetLoader.BudgetId(entity_id, year)
if budget_id in AragonBulkBudgetLoader.has_budget_data:
BudgetItem.objects.filter(budget=budget, actual=False).delete()
if budget_id in AragonBulkBudgetLoader.has_actual_data:
BudgetItem.objects.filter(budget=budget, actual=True).delete()
# Process the budget item
# We can't just go ahead and store it, since the incoming data has subtotals, so we
# need to avoid double counting amounts
budget_items = {}
for item in items:
dimension = self._get_dimension(item)
is_expense = (item[1].strip() == 'G' or dimension == 'Functional')
is_actual = self._is_actual(item)
chapter = item[5].strip()
article = item[6].strip()
concept = item[7].strip()
subconcept = item[8].strip()
description = item[9].strip()
amount = self._get_amount(item)
if amount == 0:
continue
# We are missing the area value in the functional data (almost always, there's 'P' instead)
if dimension == 'Functional' and chapter == 'P':
description = description.replace(" =", " ").strip() # Sigh, random extra '=' in input
chapter = AragonBulkBudgetLoader.functional_areas.get(description.lower().replace(' ',''),'')
if chapter=='':
print u"ALERTA: No se encuentra el area funcional para '%s': %s€" % (description.decode("utf8"), amount)
continue
uid = AragonBulkBudgetLoader.Uid(dimension, is_expense, is_actual, chapter, article, concept, subconcept)
self.keep_budget_item(budget_items, uid, description, amount)
self.load_budget_items(budget, budget_items)
def load_budget_items(self, budget, budget_items):
# Since the incoming data is not fully classified along the four dimensions we defined
# for the main budget (Aragón, the good one), we are forced to assign the items a
# catch-all fake category. (Leaving the category blank would be another possibility,
# but we'd have to modify the DB structure for that, and also our breakdown queries,
# so I'm going this slightly hackier way first.)
# Having null values will make items don't show up when breaking down along those
# levels. Be careful though, some fields are not nullable, so I'm putting an 'X'
# there; should check whether they could be made nullable.
dummy_ic = InstitutionalCategory( institution='X',
section=None,
department=None,
description='Desconocido',
budget=budget)
dummy_ic.save()
dummy_ec = EconomicCategory(expense=True, # True/False doesn't really matter
chapter='X',
article=None,
heading=None,
subheading=None,
description='Desconocido',
budget=budget)
dummy_ec.save()
dummy_fc = FunctionalCategory( area='X',
policy=None,
function=None,
programme=None,
description='Desconocido',
budget=budget)
dummy_fc.save()
dummy_fdc = FundingCategory(expense=True, # True/False doesn't really matter
source='X',
fund_class=None,
fund=None,
description='Desconocido',
budget=budget)
dummy_fdc.save()
# Store data in the database
budgeted_income = 0
budgeted_expense = 0
for uid in budget_items:
item = budget_items[uid]
if item.amount == 0: # Can happen at this point, for subtotals, now deduplicated
continue
# Check whether budget income and expense match
if uid.dimension == 'Economic' and not uid.is_actual:
if uid.is_expense:
budgeted_expense += item.amount
else:
budgeted_income += item.amount
# Sometimes we get functional data, sometimes economic
ec = dummy_ec
fc = dummy_fc
description = item.description
if uid.dimension == 'Economic':
ec = EconomicCategory.objects.filter(expense=uid.is_expense,
chapter=uid.chapter if uid.chapter != "" else None,
article=uid.chapter+uid.article if uid.article != "" else None,
heading=uid.chapter+uid.article+uid.concept if uid.concept != "" else None,
subheading=uid.chapter+uid.article+uid.concept+uid.subconcept if uid.subconcept != "" else None,
budget=budget)
if not ec:
print u"ALERTA: No se encuentra la categoría económica '%s' para '%s': %s€" % (uid, item.description.decode("utf8"), item.amount)
continue
else:
ec = ec[0]
# Replace the ugly input descriptions with the manually-curated ones.
# (This makes sense because each line in the input is a different economic category,
# in a way we only have headings in the input, not items.)
description = ec.description
else:
fc = FunctionalCategory.objects.filter(area=uid.chapter if uid.chapter != "" else None,
policy=uid.chapter+uid.article if uid.article != "" else None,
function=uid.chapter+uid.article+uid.concept if uid.concept != "" else None,
programme=uid.chapter+uid.article+uid.concept+uid.subconcept if uid.subconcept != "" else None,
budget=budget)
if not fc:
print u"ALERTA: No se encuentra la categoría económica '%s' para '%s': %s€" % (uid, item.description.decode("utf8"), item.amount)
continue
else:
fc = fc[0]
# Replace the ugly input descriptions with the manually-curated ones.
# (This makes sense because each line in the input is a different functional category,
# in a way we only have headings in the input, not items.)
description = fc.description
BudgetItem(institutional_category=dummy_ic,
functional_category=fc,
economic_category=ec,
funding_category=dummy_fdc,
expense=uid.is_expense,
actual=uid.is_actual,
amount=item.amount,
description=description,
budget=budget).save()
if budgeted_income != budgeted_expense:
print " Info: los ingresos y gastos del presupuesto no coinciden %0.2f <> %0.2f" % (budgeted_income/100.0, budgeted_expense/100.0)
# Keep track of a new found budget item, do some validation and
# amend parent categories if needed
def keep_budget_item(self, items, uid, description, amount):
if uid in items:
print "ALERTA: concepto repetido (%s). Tenía %s, ahora %s." % (uid, items[uid], amount)
return
# Add the item
items[uid] = AragonBulkBudgetLoader.Item(description, amount)
# Remove parent data, since the input data contains subtotals *sigh*
if uid.subconcept != '':
uid = AragonBulkBudgetLoader.Uid(uid.dimension, uid.is_expense, uid.is_actual, uid.chapter, uid.article, uid.concept, '')
if uid in items:
newAmount = AragonBulkBudgetLoader.Item(items[uid].description, items[uid].amount-amount)
# Negative amounts are usually (always?) sign of invalid data. Alert about it, but go on
if newAmount.amount < 0:
print " Info: cantidad negativa '%s': %s" % (newAmount.description, newAmount.amount/100)
items[uid] = newAmount
else:
print " Info: Falta el subtotal para '%s': %s" % (description, amount/100)
else:
if uid.concept != '':
uid = AragonBulkBudgetLoader.Uid(uid.dimension, uid.is_expense, uid.is_actual, uid.chapter, uid.article, '', '')
if uid in items:
newAmount = AragonBulkBudgetLoader.Item(items[uid].description, items[uid].amount-amount)
if newAmount.amount < 0:
print " Info: cantidad negativa '%s': %s" % (newAmount.description, newAmount.amount/100)
items[uid] = newAmount
else:
print " Info: Falta el subtotal para '%s': %s" % (description, amount/100)
else:
if uid.article != '':
uid = AragonBulkBudgetLoader.Uid(uid.dimension, uid.is_expense, uid.is_actual, uid.chapter, '', '', '')
if uid in items:
newAmount = AragonBulkBudgetLoader.Item(items[uid].description, items[uid].amount-amount)
if newAmount.amount < 0:
print " Info: cantidad negativa '%s': %s" % (newAmount.description, newAmount.amount/100)
items[uid] = newAmount
else:
print " Info: Falta el subtotal para '%s': %s" % (description, amount/100)
# Load a manually improved version of the economic categories classification
def load_economic_classification(self, path, budget):
reader = csv.reader(open(os.path.join(path, '..', '..', 'clasificacion_economica.csv'), 'rb'))
for index, line in enumerate(reader):
if re.match("^#", line[0]): # Ignore comments
continue
is_expense = (line[0] != 'I')
chapter = line[1]
article = line[2]
concept = line[3]
subconcept = line[4]
# We're slowly building our 'manually tuned' descriptions next to original ones
description = line[6] if len(line) > 6 and line[6] != "" else line[5]
ec = EconomicCategory( expense=is_expense,
chapter=chapter if chapter != "" else None,
article=chapter+article if article != "" else None,
heading=chapter+article+concept if concept != "" else None,
subheading=chapter+article+concept+subconcept if subconcept != "" else None,
description=description,
budget=budget)
ec.save()
# Load a manually improved version of the functional categories classification.
def load_functional_classification(self, path, budget):
reader = csv.reader(open(os.path.join(path, '..', '..', 'areas_funcionales.csv'), 'rb'))
for index, line in enumerate(reader):
if re.match("^#", line[0]): # Ignore comments
continue
area = line[0]
policy = line[1]
group = line[2]
# We're slowly building our 'manually tuned' descriptions next to original ones
description = line[4] if len(line) > 4 and line[4] != "" else line[3]
fc = FunctionalCategory(area=area if area != "" else None,
policy=area+policy if policy != "" else None,
function=area+policy+group if group != "" else None,
description=description,
budget=budget)
fc.save()
def load_functional_area_fix(self, path):
reader = csv.reader(open(os.path.join(path, '..', '..', 'areas_funcionales.csv'), 'rb'))
for index, line in enumerate(reader):
if re.match("^#", line[0]): # Ignore comments
continue
area = line[0]
# Unfortunately the data we currently have is missing the area column, so we've rebuilt it
# using this budget we found in Google :/ and need to jump through some extra hoops.
# http://www.ayora.es/ayuntamiento/index.php/ayuntamiento/hacienda/presupuestos-municipales-2013/doc_download/376-pg1307-estado-de-gastos-clasificacion-por-programas-desglose-por-partidas
AragonBulkBudgetLoader.functional_areas[line[3].lower()] = area
# Read number in Spanish format (123.456,78), and return as number of cents
# Note: I used to convert to float and multiply by 100, but that would result in a few cents off
# (in a 5000 million € budgets). We now instead check for a comma and based on that multiply by 100
# or not, but always as integer.
# TODO: Duplicated on budget_loader. Refactor
def _read_spanish_number(self, s):
# Some fields are blank in the municipal non-XBRL budget data
if (s.strip()==""):
return 0
comma = s.find(',')
if (comma>0 and comma < len(s) - 3): # More than two significant positions. Alert, shouldn't happen
print u"ALERTA: Demasiados decimales en '%s'. Ignorando..." % (s)
return 0
if (comma>0 and comma == len(s) - 3):
return int(s.replace('.', '').replace(',', ''))
else:
if (comma>0 and comma == len(s) - 2):
return int(s.replace('.', '').replace(',', '')) * 10
else: # No comma, or trailing comma (yes, it happens)
return int(s.replace('.', '')) * 100
# Returns the classification dimension of a given budget line: economic or functional
def _get_dimension(self, item):
return 'Functional' if item[1].strip() == 'F' else 'Economic'
# Whether a budget line refers to a projected (budget) or actual amount (execution)
def _is_actual(self, item):
return (item[2].strip() == 'LIQUIDACION')
# Get the amount for a budget line, trickier than you may think
def _get_amount(self, item):
if self._get_dimension(item) == 'Functional':
# Add all columns, except the last two: the last one is always zero, and the next
# to last contains some numbers whose meaning at this point is unclear (!?).
# If we remove the next to last column, then the economic and functional
# breakdowns on the expense side match. Good news.
amount = 0
for i in range(11, len(item)-2):
if item[i]:
amount += self._read_spanish_number(item[i])
else:
if self._is_actual(item):
# So I figured out the first column in execution data is the final budget
amount = self._read_spanish_number(item[12])
else:
amount = self._read_spanish_number(item[11])
return amount
| gpl-2.0 | -1,430,959,750,375,316,000 | 48.299595 | 199 | 0.547261 | false |
apache/airflow | airflow/example_dags/tutorial_taskflow_api_etl_virtualenv.py | 2 | 3481 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# [START tutorial]
# [START import_module]
import json
from airflow.decorators import dag, task
from airflow.utils.dates import days_ago
# [END import_module]
# [START default_args]
# These args will get passed on to each operator
# You can override them on a per-task basis during operator initialization
default_args = {
'owner': 'airflow',
}
# [END default_args]
# [START instantiate_dag]
@dag(default_args=default_args, schedule_interval=None, start_date=days_ago(2), tags=['example'])
def tutorial_taskflow_api_etl_virtualenv():
"""
### TaskFlow API Tutorial Documentation
This is a simple ETL data pipeline example which demonstrates the use of
the TaskFlow API using three simple tasks for Extract, Transform, and Load.
Documentation that goes along with the Airflow TaskFlow API tutorial is
located
[here](https://airflow.apache.org/docs/apache-airflow/stable/tutorial_taskflow_api.html)
"""
# [END instantiate_dag]
# [START extract_virtualenv]
@task.virtualenv(
use_dill=True,
system_site_packages=False,
requirements=['funcsigs'],
)
def extract():
"""
#### Extract task
A simple Extract task to get data ready for the rest of the data
pipeline. In this case, getting data is simulated by reading from a
hardcoded JSON string.
"""
data_string = '{"1001": 301.27, "1002": 433.21, "1003": 502.22}'
order_data_dict = json.loads(data_string)
return order_data_dict
# [END extract_virtualenv]
# [START transform_docker]
@task.virtualenv(multiple_outputs=True)
def transform(order_data_dict: dict):
"""
#### Transform task
A simple Transform task which takes in the collection of order data and
computes the total order value.
"""
total_order_value = 0
for value in order_data_dict.values():
total_order_value += value
return {"total_order_value": total_order_value}
# [END transform_docker]
# [START load]
@task()
def load(total_order_value: float):
"""
#### Load task
A simple Load task which takes in the result of the Transform task and
instead of saving it to end user review, just prints it out.
"""
print(f"Total order value is: {total_order_value:.2f}")
# [END load]
# [START main_flow]
order_data = extract()
order_summary = transform(order_data)
load(order_summary["total_order_value"])
# [END main_flow]
# [START dag_invocation]
tutorial_etl_dag = tutorial_taskflow_api_etl_virtualenv()
# [END dag_invocation]
# [END tutorial]
| apache-2.0 | -4,492,695,085,183,687,700 | 30.080357 | 97 | 0.675381 | false |
borisd13/GridCompute | source/admin/database_management.py | 1 | 2862 | '''This module contains administrator functions for database management.'''
# Copyright 2014 Boris Dayma
#
# This file is part of GridCompute.
#
# GridCompute is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# GridCompute is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCompute. If not, see <http://www.gnu.org/licenses/>.
#
# For any question, please contact Boris Dayma at [email protected]
import pymongo
def set_up_mongodb_server(mongodb_server, login, password, versions):
'''Sets up a mongodb server for GridCompute.
Mongo database "gridcompute" is initialized and the "versions" collection is created to specify
the program versions that are authorized by the database.
The "gridcompute" database must be present on the server. Any collection in it will be removed.
Args:
mongodb_server: Address of the mongo instance including connection port containing
*gridcompute* database like ``mongodbserver.com:888`` or ``10.0.0.1:888``
or ``Machine123:888``
login: Login used to connect on mongo database.
password: Password used to connect on mongo database.
versions: List of versions of gridcompute that the mongo database recognizes defined by:
- _id: version number (ex: '0.1').
- status: either "allowed", "warning" or "refused".
- message: message to be displayed when status is not "allowed" like::
[{'_id':'0.1', status:"warning", message:"Beta version},
{'_id':'1.0', status:"allowed"}]
'''
# create new connection
mongodb = pymongo.MongoClient('{}'.format(mongodb_server)).gridcompute
mongodb.authenticate(login, password)
# drop all previous collections
for collection in mongodb.collection_names(False):
mongodb.drop_collection(collection)
# create "versions" collection and populate it
mongodb['versions'].insert(versions)
if __name__ == "__main__":
# Define variables of mongodb server
mongodb_server = 'localhost:27017'
login, password = 'default_grid', 'gridcompute'
versions = [{'_id':'0.2', 'status':'warning', 'message':'This is a beta version used for test purposes only'}]
# Set up MongoDB server
set_up_mongodb_server(mongodb_server, login, password, versions)
| gpl-3.0 | -2,417,606,905,046,536,000 | 39.478261 | 114 | 0.662823 | false |
NicolasLM/spinach | spinach/contrib/sentry_sdk_spinach.py | 1 | 2397 | from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration
from spinach import signals
class SpinachIntegration(Integration):
"""Register the Sentry SDK integration.
Exceptions making jobs fail are sent to Sentry and performance
tracing of Spinach tasks is enabled.
:param send_retries: whether to also send to Sentry exceptions resulting
in a job being retried
"""
identifier = 'spinach'
def __init__(self, send_retries: bool=False):
self.send_retries = send_retries
@staticmethod
def setup_once():
signals.job_started.connect(_job_started)
signals.job_finished.connect(_job_finished)
signals.job_failed.connect(_job_failed)
signals.job_schedule_retry.connect(_job_schedule_retry)
def _job_started(namespace, job, **kwargs):
hub = Hub.current
# Scopes are for error reporting
hub.push_scope()
with hub.configure_scope() as scope:
scope.transaction = job.task_name
scope.clear_breadcrumbs()
for attr in job.__slots__:
scope.set_extra(attr, getattr(job, attr))
# Transactions and spans are for tracing
transaction = hub.start_transaction(
op='task',
name=job.task_name
)
# Transaction are meant to be used as a context manager,
# but this does not fit the signals based approach well so
# pretend that we use a context manager.
transaction.__enter__()
def _job_finished(namespace, job, **kwargs):
hub = Hub.current
with hub.configure_scope() as scope:
for attr in job.__slots__:
scope.set_extra(attr, getattr(job, attr))
hub.scope.transaction.__exit__(None, None, None)
hub.pop_scope_unsafe()
def _job_failed(namespace, job, **kwargs):
hub = Hub.current
with hub.configure_scope() as scope:
for attr in job.__slots__:
scope.set_extra(attr, getattr(job, attr))
hub.capture_exception()
hub.scope.transaction.set_status("internal_error")
def _job_schedule_retry(namespace, job, **kwargs):
hub = Hub.current
with hub.configure_scope() as scope:
for attr in job.__slots__:
scope.set_extra(attr, getattr(job, attr))
integration = hub.get_integration(SpinachIntegration)
if integration is None:
return
if integration.send_retries:
hub.capture_exception()
| bsd-2-clause | 4,695,782,351,937,375,000 | 28.9625 | 76 | 0.659992 | false |
google/clusterfuzz | src/python/tests/core/platforms/android/stack_analyzer_test.py | 1 | 5322 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the stack analyzer module for android specifically."""
import os
import unittest
from base import utils
from crash_analysis.stack_parsing import stack_analyzer
from system import environment
from tests.test_libs import helpers as test_helpers
DATA_DIRECTORY = os.path.join(os.path.dirname(__file__), 'stack_analyzer_data')
TEST_JOB_NAME = 'test'
KERNEL_REPRO = """kernel/build u'c059b39e1caf2b96aa376582eeb93062b43d69d5'
kernel/manifest u'75a64986ab455f8b45087b8ad54db68bcb8988f4'
kernel/private/msm-google u'40e9b2ff3a280a8775cfcd5841e530ce78f94355'
kernel/private/msm-google-extra/audiokernel u'112a618d5b757b0600c69f7385892b3f57ccd93e'
kernel/private/msm-google-modules/data-kernel u'e7210f09d00c91f87b295c7a952f040c73506cc0'
kernel/private/msm-google-modules/fts_touch u'8f6a4e9f5649deff59174ffed1d5c2af196d9f63'
kernel/private/msm-google-modules/qca-wfi-host-cmn u'7d4b05ac12d6a1b5d5247da35ae7e370a2cba07d'
kernel/private/msm-google-modules/qcacld u'0a077b0073c48555d0edb2b9b0510fb883181828'
kernel/private/msm-google-modules/wlan-fw-api u'53d899727e4278f4e9fb46328d740a8fb2d9a493'
kernel/private/tests/patchwork u'204e78fb6d905016bfc16ebe7b64547f388cfdb5'
kernel/tests u'bfef3bb78b23cb3f3f12a6880ecafd5def3b66a5'
platform/external/fff u'c82edb1fc60dc81bd319d9b8d0bee9f8963a6960'
platform/external/googletest u'a037984aea3317260edd1127abb39e30e845bc94'
platform/prebuilts/clang/host/linux-x86 u'4b1f275e6b3826c86f791ae8c4d5ec3563c2fc11'
platform/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9 u'961622e926a1b21382dba4dd9fe0e5fb3ee5ab7c'
platform/prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 u'cb7b3ac1b7fdb49474ff68761909934d1142f594'
platform/prebuilts/misc u'15560bb32cdb9b47db48eb4865b736df9708a8fe'
platform/tools/repohooks u'233b8010f7f5e3c544b47c68ffae781860156945'
"""
# pylint: disable=unused-argument
def _mock_symbolize_stacktrace(stacktrace, enable_inline_frames=True):
"""No-op mocked version of symbolize_stacktrace."""
return stacktrace
def _mock_fetch_artifact_get(bid,
target,
regex,
output_directory,
output_filename_override=None):
if output_filename_override:
artifact_path = os.path.join(output_directory, output_filename_override)
with open(artifact_path, 'w') as artifact_file:
artifact_file.write(KERNEL_REPRO)
# pylint: enable=unused-argument
class AndroidStackAnalyzerTest(unittest.TestCase):
"""Android specific Stack analyzer tests."""
def setUp(self):
test_helpers.patch_environ(self)
test_helpers.patch(self, [
'crash_analysis.stack_parsing.stack_symbolizer.symbolize_stacktrace',
'metrics.logs.log_error',
])
os.environ['JOB_NAME'] = TEST_JOB_NAME
self.mock.symbolize_stacktrace.side_effect = _mock_symbolize_stacktrace
def _mock_read_data_from_file(self, file_path, eval_data=True, default=None):
if file_path.endswith('repo.prop'):
return self._real_read_data_from_file(file_path, eval_data, default)
return None
def _read_test_data(self, name):
"""Helper function to read test data."""
with open(os.path.join(DATA_DIRECTORY, name)) as handle:
return handle.read()
def test_syzkaller_kasan_android_with_env(self):
"""Test syzkaller kasan."""
environment.set_value('OS_OVERRIDE', 'ANDROID_KERNEL')
environment.set_bot_environment()
self._real_read_data_from_file = utils.read_data_from_file
test_helpers.patch(self, [
'platforms.android.fetch_artifact.get',
'platforms.android.kernel_utils.get_kernel_hash_and_build_id',
'platforms.android.kernel_utils.get_kernel_name',
'platforms.android.settings.get_product_brand',
'google_cloud_utils.storage.get_file_from_cache_if_exists',
'google_cloud_utils.storage.store_file_in_cache',
'base.utils.write_data_to_file', 'base.utils.read_data_from_file'
])
self.mock.get.side_effect = _mock_fetch_artifact_get
self.mock.get_kernel_hash_and_build_id.return_value = '40e9b2ff3a2', '12345'
self.mock.get_kernel_name.return_value = 'device_kernel'
self.mock.get_product_brand.return_value = 'google'
self.mock.get_file_from_cache_if_exists.return_value = False
self.mock.store_file_in_cache.return_value = None
self.mock.write_data_to_file = None
self.mock.read_data_from_file.side_effect = self._mock_read_data_from_file
data = self._read_test_data('kasan_syzkaller_android.txt')
expected_stack = self._read_test_data(
'kasan_syzkaller_android_linkified.txt')
actual_state = stack_analyzer.get_crash_data(data)
self.assertEqual(actual_state.crash_stacktrace, expected_stack)
| apache-2.0 | -1,095,052,181,054,162,400 | 43.722689 | 110 | 0.752537 | false |
google/clusterfuzz | src/python/bot/untrusted_runner/tasks_impl.py | 1 | 6716 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tasks RPC implementations."""
from google.protobuf import wrappers_pb2
from google.protobuf.any_pb2 import Any
import six
from bot import testcase_manager
from bot.tasks import corpus_pruning_task
from bot.tasks import fuzz_task
from bot.tasks import minimize_task
from datastore import data_types
from lib.clusterfuzz.fuzz import engine
from protos import untrusted_runner_pb2
def _proto_to_fuzz_target(proto):
"""Convert protobuf to FuzzTarget."""
return data_types.FuzzTarget(
engine=proto.engine, project=proto.project, binary=proto.binary)
def _proto_to_cross_pollinate_fuzzer(proto):
"""Convert protobuf to CrossPollinateFuzzer."""
return corpus_pruning_task.CrossPollinateFuzzer(
fuzz_target=_proto_to_fuzz_target(proto.fuzz_target),
backup_bucket_name=proto.backup_bucket_name,
corpus_engine_name=proto.corpus_engine_name)
def prune_corpus(request, _):
"""Prune corpus."""
context = corpus_pruning_task.Context(
_proto_to_fuzz_target(request.fuzz_target), [
_proto_to_cross_pollinate_fuzzer(proto)
for proto in request.cross_pollinate_fuzzers
])
result = corpus_pruning_task.do_corpus_pruning(
context, request.last_execution_failed, request.revision)
cross_pollination_stats = None
if result.cross_pollination_stats:
cross_pollination_stats = untrusted_runner_pb2.CrossPollinationStats(
project_qualified_name=result.cross_pollination_stats.
project_qualified_name,
method=result.cross_pollination_stats.method,
sources=result.cross_pollination_stats.sources,
tags=result.cross_pollination_stats.tags,
initial_corpus_size=result.cross_pollination_stats.initial_corpus_size,
corpus_size=result.cross_pollination_stats.corpus_size,
initial_edge_coverage=result.cross_pollination_stats.
initial_edge_coverage,
edge_coverage=result.cross_pollination_stats.edge_coverage,
initial_feature_coverage=result.cross_pollination_stats.
initial_feature_coverage,
feature_coverage=result.cross_pollination_stats.feature_coverage)
# Intentionally skip edge and function coverage values as those would come
# from fuzzer coverage cron task (see src/go/server/cron/coverage.go).
coverage_info = untrusted_runner_pb2.CoverageInfo(
corpus_size_units=result.coverage_info.corpus_size_units,
corpus_size_bytes=result.coverage_info.corpus_size_bytes,
corpus_location=result.coverage_info.corpus_location,
corpus_backup_location=result.coverage_info.corpus_backup_location,
quarantine_size_units=result.coverage_info.quarantine_size_units,
quarantine_size_bytes=result.coverage_info.quarantine_size_bytes,
quarantine_location=result.coverage_info.quarantine_location)
crashes = [
untrusted_runner_pb2.CorpusCrash(
crash_state=crash.crash_state,
crash_type=crash.crash_type,
crash_address=crash.crash_address,
crash_stacktrace=crash.crash_stacktrace,
unit_path=crash.unit_path,
security_flag=crash.security_flag,
) for crash in result.crashes
]
return untrusted_runner_pb2.PruneCorpusResponse(
coverage_info=coverage_info,
crashes=crashes,
fuzzer_binary_name=result.fuzzer_binary_name,
revision=result.revision,
cross_pollination_stats=cross_pollination_stats)
def process_testcase(request, _):
"""Process testcase."""
tool_name_map = {
untrusted_runner_pb2.ProcessTestcaseRequest.MINIMIZE: 'minimize',
untrusted_runner_pb2.ProcessTestcaseRequest.CLEANSE: 'cleanse',
}
# TODO(ochang): Support other engines.
assert request.engine == 'libFuzzer'
assert request.operation in tool_name_map
result = minimize_task.run_libfuzzer_engine(
tool_name_map[request.operation], request.target_name, request.arguments,
request.testcase_path, request.output_path, request.timeout)
return untrusted_runner_pb2.EngineReproduceResult(
return_code=result.return_code,
time_executed=result.time_executed,
output=result.output)
def _pack_values(values):
"""Pack protobuf values."""
packed = {}
if values is None:
return packed
for key, value in six.iteritems(values):
packed_value = Any()
if isinstance(value, float):
packed_value.Pack(wrappers_pb2.DoubleValue(value=value))
elif isinstance(value, six.integer_types):
packed_value.Pack(wrappers_pb2.Int64Value(value=value))
elif isinstance(value, six.string_types):
packed_value.Pack(wrappers_pb2.StringValue(value=value))
else:
raise ValueError('Unknown stat type for ' + key)
packed[key] = packed_value
return packed
def engine_fuzz(request, _):
"""Run engine fuzzer."""
engine_impl = engine.get(request.engine)
result, fuzzer_metadata, strategies = fuzz_task.run_engine_fuzzer(
engine_impl, request.target_name, request.sync_corpus_directory,
request.testcase_directory)
crashes = [
untrusted_runner_pb2.EngineCrash(
input_path=crash.input_path,
stacktrace=crash.stacktrace,
reproduce_args=crash.reproduce_args,
crash_time=crash.crash_time) for crash in result.crashes
]
packed_stats = _pack_values(result.stats)
packed_strategies = _pack_values(strategies)
return untrusted_runner_pb2.EngineFuzzResponse(
logs=result.logs,
command=result.command,
crashes=crashes,
stats=packed_stats,
time_executed=result.time_executed,
fuzzer_metadata=fuzzer_metadata,
strategies=packed_strategies)
def engine_reproduce(request, _):
"""Run engine reproduce."""
engine_impl = engine.get(request.engine)
result = testcase_manager.engine_reproduce(engine_impl, request.target_name,
request.testcase_path,
request.arguments, request.timeout)
return untrusted_runner_pb2.EngineReproduceResult(
command=result.command,
return_code=result.return_code,
time_executed=result.time_executed,
output=result.output)
| apache-2.0 | 1,779,452,652,457,560,600 | 35.901099 | 80 | 0.717391 | false |
fcurella/cookiejar | setup.py | 1 | 1555 | import os
import sys
from setuptools import setup, find_packages
def read(fname):
try:
with open(os.path.join(os.path.dirname(__file__), fname)) as fh:
return fh.read()
except IOError:
return ''
requirements = read('REQUIREMENTS').splitlines()
tests_requirements = read('TEST-REQUIREMENTS').splitlines()
packages = find_packages(exclude=['tests'])
# Avoid byte-compiling the shipped template
sys.dont_write_bytecode = True
setup(
name="cookiejar",
version="0.0.3",
description="Cookiecutter templates discovery and management.",
long_description=read('README.rst'),
url='https://github.com/fcurella/cookiejar',
license='MIT',
author='Flavio Curella',
author_email='[email protected]',
packages=packages,
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
scripts=['bin/cookiejar'],
install_requires=requirements,
tests_require=tests_requirements,
)
| mit | -8,612,719,314,905,511,000 | 29.490196 | 72 | 0.639871 | false |
hkarl/svpb | svpb/wsgi.py | 1 | 1413 | """
WSGI config for svpb project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "svpb.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "svpb.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| apache-2.0 | 8,092,257,263,339,219,000 | 43.15625 | 79 | 0.791932 | false |
ncbray/pystream | sandbox/interface_vec.py | 1 | 22906 | # Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from shader.vec import *
from decl import *
cls = class_(vec2)
cls.slot('x', float)
cls.slot('y', float)
cls.method('__init__', float, float)
cls.method('__repr__')
cls.method('__float__')
cls.method('dot', vec2)
cls.method('length')
cls.method('distance', vec2)
cls.method('normalize')
cls.method('cross', vec2)
cls.method('mix', vec2, (vec2, float))
cls.method('reflect', vec2)
cls.method('refract', vec2, float)
cls.method('exp')
cls.method('log')
cls.method('__pos__')
cls.method('__neg__')
cls.method('__abs__')
cls.method('__add__', (vec2, float))
cls.method('__radd__', float)
cls.method('__sub__', (vec2, float))
cls.method('__rsub__', float)
cls.method('__mul__', (vec2, float))
cls.method('__rmul__', float)
cls.method('__div__', (vec2, float))
cls.method('__rdiv__', float)
cls.method('__pow__', (vec2, float))
cls.method('__rpow__', float)
cls.method('min', (vec2, float))
cls.method('rmin', float)
cls.method('max', (vec2, float))
cls.method('rmax', float)
cls.getter('r')
cls.getter('xx')
cls.getter('rr')
cls.getter('xxx')
cls.getter('rrr')
cls.getter('xxxx')
cls.getter('rrrr')
cls.getter('xxxy')
cls.getter('rrrg')
cls.getter('xxy')
cls.getter('rrg')
cls.getter('xxyx')
cls.getter('rrgr')
cls.getter('xxyy')
cls.getter('rrgg')
cls.getter('xy')
cls.getter('rg')
cls.getter('xyx')
cls.getter('rgr')
cls.getter('xyxx')
cls.getter('rgrr')
cls.getter('xyxy')
cls.getter('rgrg')
cls.getter('xyy')
cls.getter('rgg')
cls.getter('xyyx')
cls.getter('rggr')
cls.getter('xyyy')
cls.getter('rggg')
cls.getter('g')
cls.getter('yx')
cls.getter('gr')
cls.getter('yxx')
cls.getter('grr')
cls.getter('yxxx')
cls.getter('grrr')
cls.getter('yxxy')
cls.getter('grrg')
cls.getter('yxy')
cls.getter('grg')
cls.getter('yxyx')
cls.getter('grgr')
cls.getter('yxyy')
cls.getter('grgg')
cls.getter('yy')
cls.getter('gg')
cls.getter('yyx')
cls.getter('ggr')
cls.getter('yyxx')
cls.getter('ggrr')
cls.getter('yyxy')
cls.getter('ggrg')
cls.getter('yyy')
cls.getter('ggg')
cls.getter('yyyx')
cls.getter('gggr')
cls.getter('yyyy')
cls.getter('gggg')
cls = class_(vec3)
cls.slot('x', float)
cls.slot('y', float)
cls.slot('z', float)
cls.method('__init__', float, float, float)
cls.method('__repr__')
cls.method('__float__')
cls.method('dot', vec3)
cls.method('length')
cls.method('distance', vec3)
cls.method('normalize')
cls.method('cross', vec3)
cls.method('mix', vec3, (vec3, float))
cls.method('reflect', vec3)
cls.method('refract', vec3, float)
cls.method('exp')
cls.method('log')
cls.method('__pos__')
cls.method('__neg__')
cls.method('__abs__')
cls.method('__add__', (vec3, float))
cls.method('__radd__', float)
cls.method('__sub__', (vec3, float))
cls.method('__rsub__', float)
cls.method('__mul__', (vec3, float))
cls.method('__rmul__', float)
cls.method('__div__', (vec3, float))
cls.method('__rdiv__', float)
cls.method('__pow__', (vec3, float))
cls.method('__rpow__', float)
cls.method('min', (vec3, float))
cls.method('rmin', float)
cls.method('max', (vec3, float))
cls.method('rmax', float)
cls.getter('r')
cls.getter('xx')
cls.getter('rr')
cls.getter('xxx')
cls.getter('rrr')
cls.getter('xxxx')
cls.getter('rrrr')
cls.getter('xxxy')
cls.getter('rrrg')
cls.getter('xxxz')
cls.getter('rrrb')
cls.getter('xxy')
cls.getter('rrg')
cls.getter('xxyx')
cls.getter('rrgr')
cls.getter('xxyy')
cls.getter('rrgg')
cls.getter('xxyz')
cls.getter('rrgb')
cls.getter('xxz')
cls.getter('rrb')
cls.getter('xxzx')
cls.getter('rrbr')
cls.getter('xxzy')
cls.getter('rrbg')
cls.getter('xxzz')
cls.getter('rrbb')
cls.getter('xy')
cls.getter('rg')
cls.getter('xyx')
cls.getter('rgr')
cls.getter('xyxx')
cls.getter('rgrr')
cls.getter('xyxy')
cls.getter('rgrg')
cls.getter('xyxz')
cls.getter('rgrb')
cls.getter('xyy')
cls.getter('rgg')
cls.getter('xyyx')
cls.getter('rggr')
cls.getter('xyyy')
cls.getter('rggg')
cls.getter('xyyz')
cls.getter('rggb')
cls.getter('xyz')
cls.getter('rgb')
cls.getter('xyzx')
cls.getter('rgbr')
cls.getter('xyzy')
cls.getter('rgbg')
cls.getter('xyzz')
cls.getter('rgbb')
cls.getter('xz')
cls.getter('rb')
cls.getter('xzx')
cls.getter('rbr')
cls.getter('xzxx')
cls.getter('rbrr')
cls.getter('xzxy')
cls.getter('rbrg')
cls.getter('xzxz')
cls.getter('rbrb')
cls.getter('xzy')
cls.getter('rbg')
cls.getter('xzyx')
cls.getter('rbgr')
cls.getter('xzyy')
cls.getter('rbgg')
cls.getter('xzyz')
cls.getter('rbgb')
cls.getter('xzz')
cls.getter('rbb')
cls.getter('xzzx')
cls.getter('rbbr')
cls.getter('xzzy')
cls.getter('rbbg')
cls.getter('xzzz')
cls.getter('rbbb')
cls.getter('g')
cls.getter('yx')
cls.getter('gr')
cls.getter('yxx')
cls.getter('grr')
cls.getter('yxxx')
cls.getter('grrr')
cls.getter('yxxy')
cls.getter('grrg')
cls.getter('yxxz')
cls.getter('grrb')
cls.getter('yxy')
cls.getter('grg')
cls.getter('yxyx')
cls.getter('grgr')
cls.getter('yxyy')
cls.getter('grgg')
cls.getter('yxyz')
cls.getter('grgb')
cls.getter('yxz')
cls.getter('grb')
cls.getter('yxzx')
cls.getter('grbr')
cls.getter('yxzy')
cls.getter('grbg')
cls.getter('yxzz')
cls.getter('grbb')
cls.getter('yy')
cls.getter('gg')
cls.getter('yyx')
cls.getter('ggr')
cls.getter('yyxx')
cls.getter('ggrr')
cls.getter('yyxy')
cls.getter('ggrg')
cls.getter('yyxz')
cls.getter('ggrb')
cls.getter('yyy')
cls.getter('ggg')
cls.getter('yyyx')
cls.getter('gggr')
cls.getter('yyyy')
cls.getter('gggg')
cls.getter('yyyz')
cls.getter('gggb')
cls.getter('yyz')
cls.getter('ggb')
cls.getter('yyzx')
cls.getter('ggbr')
cls.getter('yyzy')
cls.getter('ggbg')
cls.getter('yyzz')
cls.getter('ggbb')
cls.getter('yz')
cls.getter('gb')
cls.getter('yzx')
cls.getter('gbr')
cls.getter('yzxx')
cls.getter('gbrr')
cls.getter('yzxy')
cls.getter('gbrg')
cls.getter('yzxz')
cls.getter('gbrb')
cls.getter('yzy')
cls.getter('gbg')
cls.getter('yzyx')
cls.getter('gbgr')
cls.getter('yzyy')
cls.getter('gbgg')
cls.getter('yzyz')
cls.getter('gbgb')
cls.getter('yzz')
cls.getter('gbb')
cls.getter('yzzx')
cls.getter('gbbr')
cls.getter('yzzy')
cls.getter('gbbg')
cls.getter('yzzz')
cls.getter('gbbb')
cls.getter('b')
cls.getter('zx')
cls.getter('br')
cls.getter('zxx')
cls.getter('brr')
cls.getter('zxxx')
cls.getter('brrr')
cls.getter('zxxy')
cls.getter('brrg')
cls.getter('zxxz')
cls.getter('brrb')
cls.getter('zxy')
cls.getter('brg')
cls.getter('zxyx')
cls.getter('brgr')
cls.getter('zxyy')
cls.getter('brgg')
cls.getter('zxyz')
cls.getter('brgb')
cls.getter('zxz')
cls.getter('brb')
cls.getter('zxzx')
cls.getter('brbr')
cls.getter('zxzy')
cls.getter('brbg')
cls.getter('zxzz')
cls.getter('brbb')
cls.getter('zy')
cls.getter('bg')
cls.getter('zyx')
cls.getter('bgr')
cls.getter('zyxx')
cls.getter('bgrr')
cls.getter('zyxy')
cls.getter('bgrg')
cls.getter('zyxz')
cls.getter('bgrb')
cls.getter('zyy')
cls.getter('bgg')
cls.getter('zyyx')
cls.getter('bggr')
cls.getter('zyyy')
cls.getter('bggg')
cls.getter('zyyz')
cls.getter('bggb')
cls.getter('zyz')
cls.getter('bgb')
cls.getter('zyzx')
cls.getter('bgbr')
cls.getter('zyzy')
cls.getter('bgbg')
cls.getter('zyzz')
cls.getter('bgbb')
cls.getter('zz')
cls.getter('bb')
cls.getter('zzx')
cls.getter('bbr')
cls.getter('zzxx')
cls.getter('bbrr')
cls.getter('zzxy')
cls.getter('bbrg')
cls.getter('zzxz')
cls.getter('bbrb')
cls.getter('zzy')
cls.getter('bbg')
cls.getter('zzyx')
cls.getter('bbgr')
cls.getter('zzyy')
cls.getter('bbgg')
cls.getter('zzyz')
cls.getter('bbgb')
cls.getter('zzz')
cls.getter('bbb')
cls.getter('zzzx')
cls.getter('bbbr')
cls.getter('zzzy')
cls.getter('bbbg')
cls.getter('zzzz')
cls.getter('bbbb')
cls = class_(vec4)
cls.slot('x', float)
cls.slot('y', float)
cls.slot('z', float)
cls.slot('w', float)
cls.method('__init__', float, float, float, float)
cls.method('__repr__')
cls.method('__float__')
cls.method('dot', vec4)
cls.method('length')
cls.method('distance', vec4)
cls.method('normalize')
cls.method('cross', vec4)
cls.method('mix', vec4, (vec4, float))
cls.method('reflect', vec4)
cls.method('refract', vec4, float)
cls.method('exp')
cls.method('log')
cls.method('__pos__')
cls.method('__neg__')
cls.method('__abs__')
cls.method('__add__', (vec4, float))
cls.method('__radd__', float)
cls.method('__sub__', (vec4, float))
cls.method('__rsub__', float)
cls.method('__mul__', (vec4, float))
cls.method('__rmul__', float)
cls.method('__div__', (vec4, float))
cls.method('__rdiv__', float)
cls.method('__pow__', (vec4, float))
cls.method('__rpow__', float)
cls.method('min', (vec4, float))
cls.method('rmin', float)
cls.method('max', (vec4, float))
cls.method('rmax', float)
cls.getter('r')
cls.getter('xx')
cls.getter('rr')
cls.getter('xxx')
cls.getter('rrr')
cls.getter('xxxx')
cls.getter('rrrr')
cls.getter('xxxy')
cls.getter('rrrg')
cls.getter('xxxz')
cls.getter('rrrb')
cls.getter('xxxw')
cls.getter('rrra')
cls.getter('xxy')
cls.getter('rrg')
cls.getter('xxyx')
cls.getter('rrgr')
cls.getter('xxyy')
cls.getter('rrgg')
cls.getter('xxyz')
cls.getter('rrgb')
cls.getter('xxyw')
cls.getter('rrga')
cls.getter('xxz')
cls.getter('rrb')
cls.getter('xxzx')
cls.getter('rrbr')
cls.getter('xxzy')
cls.getter('rrbg')
cls.getter('xxzz')
cls.getter('rrbb')
cls.getter('xxzw')
cls.getter('rrba')
cls.getter('xxw')
cls.getter('rra')
cls.getter('xxwx')
cls.getter('rrar')
cls.getter('xxwy')
cls.getter('rrag')
cls.getter('xxwz')
cls.getter('rrab')
cls.getter('xxww')
cls.getter('rraa')
cls.getter('xy')
cls.getter('rg')
cls.getter('xyx')
cls.getter('rgr')
cls.getter('xyxx')
cls.getter('rgrr')
cls.getter('xyxy')
cls.getter('rgrg')
cls.getter('xyxz')
cls.getter('rgrb')
cls.getter('xyxw')
cls.getter('rgra')
cls.getter('xyy')
cls.getter('rgg')
cls.getter('xyyx')
cls.getter('rggr')
cls.getter('xyyy')
cls.getter('rggg')
cls.getter('xyyz')
cls.getter('rggb')
cls.getter('xyyw')
cls.getter('rgga')
cls.getter('xyz')
cls.getter('rgb')
cls.getter('xyzx')
cls.getter('rgbr')
cls.getter('xyzy')
cls.getter('rgbg')
cls.getter('xyzz')
cls.getter('rgbb')
cls.getter('xyzw')
cls.getter('rgba')
cls.getter('xyw')
cls.getter('rga')
cls.getter('xywx')
cls.getter('rgar')
cls.getter('xywy')
cls.getter('rgag')
cls.getter('xywz')
cls.getter('rgab')
cls.getter('xyww')
cls.getter('rgaa')
cls.getter('xz')
cls.getter('rb')
cls.getter('xzx')
cls.getter('rbr')
cls.getter('xzxx')
cls.getter('rbrr')
cls.getter('xzxy')
cls.getter('rbrg')
cls.getter('xzxz')
cls.getter('rbrb')
cls.getter('xzxw')
cls.getter('rbra')
cls.getter('xzy')
cls.getter('rbg')
cls.getter('xzyx')
cls.getter('rbgr')
cls.getter('xzyy')
cls.getter('rbgg')
cls.getter('xzyz')
cls.getter('rbgb')
cls.getter('xzyw')
cls.getter('rbga')
cls.getter('xzz')
cls.getter('rbb')
cls.getter('xzzx')
cls.getter('rbbr')
cls.getter('xzzy')
cls.getter('rbbg')
cls.getter('xzzz')
cls.getter('rbbb')
cls.getter('xzzw')
cls.getter('rbba')
cls.getter('xzw')
cls.getter('rba')
cls.getter('xzwx')
cls.getter('rbar')
cls.getter('xzwy')
cls.getter('rbag')
cls.getter('xzwz')
cls.getter('rbab')
cls.getter('xzww')
cls.getter('rbaa')
cls.getter('xw')
cls.getter('ra')
cls.getter('xwx')
cls.getter('rar')
cls.getter('xwxx')
cls.getter('rarr')
cls.getter('xwxy')
cls.getter('rarg')
cls.getter('xwxz')
cls.getter('rarb')
cls.getter('xwxw')
cls.getter('rara')
cls.getter('xwy')
cls.getter('rag')
cls.getter('xwyx')
cls.getter('ragr')
cls.getter('xwyy')
cls.getter('ragg')
cls.getter('xwyz')
cls.getter('ragb')
cls.getter('xwyw')
cls.getter('raga')
cls.getter('xwz')
cls.getter('rab')
cls.getter('xwzx')
cls.getter('rabr')
cls.getter('xwzy')
cls.getter('rabg')
cls.getter('xwzz')
cls.getter('rabb')
cls.getter('xwzw')
cls.getter('raba')
cls.getter('xww')
cls.getter('raa')
cls.getter('xwwx')
cls.getter('raar')
cls.getter('xwwy')
cls.getter('raag')
cls.getter('xwwz')
cls.getter('raab')
cls.getter('xwww')
cls.getter('raaa')
cls.getter('g')
cls.getter('yx')
cls.getter('gr')
cls.getter('yxx')
cls.getter('grr')
cls.getter('yxxx')
cls.getter('grrr')
cls.getter('yxxy')
cls.getter('grrg')
cls.getter('yxxz')
cls.getter('grrb')
cls.getter('yxxw')
cls.getter('grra')
cls.getter('yxy')
cls.getter('grg')
cls.getter('yxyx')
cls.getter('grgr')
cls.getter('yxyy')
cls.getter('grgg')
cls.getter('yxyz')
cls.getter('grgb')
cls.getter('yxyw')
cls.getter('grga')
cls.getter('yxz')
cls.getter('grb')
cls.getter('yxzx')
cls.getter('grbr')
cls.getter('yxzy')
cls.getter('grbg')
cls.getter('yxzz')
cls.getter('grbb')
cls.getter('yxzw')
cls.getter('grba')
cls.getter('yxw')
cls.getter('gra')
cls.getter('yxwx')
cls.getter('grar')
cls.getter('yxwy')
cls.getter('grag')
cls.getter('yxwz')
cls.getter('grab')
cls.getter('yxww')
cls.getter('graa')
cls.getter('yy')
cls.getter('gg')
cls.getter('yyx')
cls.getter('ggr')
cls.getter('yyxx')
cls.getter('ggrr')
cls.getter('yyxy')
cls.getter('ggrg')
cls.getter('yyxz')
cls.getter('ggrb')
cls.getter('yyxw')
cls.getter('ggra')
cls.getter('yyy')
cls.getter('ggg')
cls.getter('yyyx')
cls.getter('gggr')
cls.getter('yyyy')
cls.getter('gggg')
cls.getter('yyyz')
cls.getter('gggb')
cls.getter('yyyw')
cls.getter('ggga')
cls.getter('yyz')
cls.getter('ggb')
cls.getter('yyzx')
cls.getter('ggbr')
cls.getter('yyzy')
cls.getter('ggbg')
cls.getter('yyzz')
cls.getter('ggbb')
cls.getter('yyzw')
cls.getter('ggba')
cls.getter('yyw')
cls.getter('gga')
cls.getter('yywx')
cls.getter('ggar')
cls.getter('yywy')
cls.getter('ggag')
cls.getter('yywz')
cls.getter('ggab')
cls.getter('yyww')
cls.getter('ggaa')
cls.getter('yz')
cls.getter('gb')
cls.getter('yzx')
cls.getter('gbr')
cls.getter('yzxx')
cls.getter('gbrr')
cls.getter('yzxy')
cls.getter('gbrg')
cls.getter('yzxz')
cls.getter('gbrb')
cls.getter('yzxw')
cls.getter('gbra')
cls.getter('yzy')
cls.getter('gbg')
cls.getter('yzyx')
cls.getter('gbgr')
cls.getter('yzyy')
cls.getter('gbgg')
cls.getter('yzyz')
cls.getter('gbgb')
cls.getter('yzyw')
cls.getter('gbga')
cls.getter('yzz')
cls.getter('gbb')
cls.getter('yzzx')
cls.getter('gbbr')
cls.getter('yzzy')
cls.getter('gbbg')
cls.getter('yzzz')
cls.getter('gbbb')
cls.getter('yzzw')
cls.getter('gbba')
cls.getter('yzw')
cls.getter('gba')
cls.getter('yzwx')
cls.getter('gbar')
cls.getter('yzwy')
cls.getter('gbag')
cls.getter('yzwz')
cls.getter('gbab')
cls.getter('yzww')
cls.getter('gbaa')
cls.getter('yw')
cls.getter('ga')
cls.getter('ywx')
cls.getter('gar')
cls.getter('ywxx')
cls.getter('garr')
cls.getter('ywxy')
cls.getter('garg')
cls.getter('ywxz')
cls.getter('garb')
cls.getter('ywxw')
cls.getter('gara')
cls.getter('ywy')
cls.getter('gag')
cls.getter('ywyx')
cls.getter('gagr')
cls.getter('ywyy')
cls.getter('gagg')
cls.getter('ywyz')
cls.getter('gagb')
cls.getter('ywyw')
cls.getter('gaga')
cls.getter('ywz')
cls.getter('gab')
cls.getter('ywzx')
cls.getter('gabr')
cls.getter('ywzy')
cls.getter('gabg')
cls.getter('ywzz')
cls.getter('gabb')
cls.getter('ywzw')
cls.getter('gaba')
cls.getter('yww')
cls.getter('gaa')
cls.getter('ywwx')
cls.getter('gaar')
cls.getter('ywwy')
cls.getter('gaag')
cls.getter('ywwz')
cls.getter('gaab')
cls.getter('ywww')
cls.getter('gaaa')
cls.getter('b')
cls.getter('zx')
cls.getter('br')
cls.getter('zxx')
cls.getter('brr')
cls.getter('zxxx')
cls.getter('brrr')
cls.getter('zxxy')
cls.getter('brrg')
cls.getter('zxxz')
cls.getter('brrb')
cls.getter('zxxw')
cls.getter('brra')
cls.getter('zxy')
cls.getter('brg')
cls.getter('zxyx')
cls.getter('brgr')
cls.getter('zxyy')
cls.getter('brgg')
cls.getter('zxyz')
cls.getter('brgb')
cls.getter('zxyw')
cls.getter('brga')
cls.getter('zxz')
cls.getter('brb')
cls.getter('zxzx')
cls.getter('brbr')
cls.getter('zxzy')
cls.getter('brbg')
cls.getter('zxzz')
cls.getter('brbb')
cls.getter('zxzw')
cls.getter('brba')
cls.getter('zxw')
cls.getter('bra')
cls.getter('zxwx')
cls.getter('brar')
cls.getter('zxwy')
cls.getter('brag')
cls.getter('zxwz')
cls.getter('brab')
cls.getter('zxww')
cls.getter('braa')
cls.getter('zy')
cls.getter('bg')
cls.getter('zyx')
cls.getter('bgr')
cls.getter('zyxx')
cls.getter('bgrr')
cls.getter('zyxy')
cls.getter('bgrg')
cls.getter('zyxz')
cls.getter('bgrb')
cls.getter('zyxw')
cls.getter('bgra')
cls.getter('zyy')
cls.getter('bgg')
cls.getter('zyyx')
cls.getter('bggr')
cls.getter('zyyy')
cls.getter('bggg')
cls.getter('zyyz')
cls.getter('bggb')
cls.getter('zyyw')
cls.getter('bgga')
cls.getter('zyz')
cls.getter('bgb')
cls.getter('zyzx')
cls.getter('bgbr')
cls.getter('zyzy')
cls.getter('bgbg')
cls.getter('zyzz')
cls.getter('bgbb')
cls.getter('zyzw')
cls.getter('bgba')
cls.getter('zyw')
cls.getter('bga')
cls.getter('zywx')
cls.getter('bgar')
cls.getter('zywy')
cls.getter('bgag')
cls.getter('zywz')
cls.getter('bgab')
cls.getter('zyww')
cls.getter('bgaa')
cls.getter('zz')
cls.getter('bb')
cls.getter('zzx')
cls.getter('bbr')
cls.getter('zzxx')
cls.getter('bbrr')
cls.getter('zzxy')
cls.getter('bbrg')
cls.getter('zzxz')
cls.getter('bbrb')
cls.getter('zzxw')
cls.getter('bbra')
cls.getter('zzy')
cls.getter('bbg')
cls.getter('zzyx')
cls.getter('bbgr')
cls.getter('zzyy')
cls.getter('bbgg')
cls.getter('zzyz')
cls.getter('bbgb')
cls.getter('zzyw')
cls.getter('bbga')
cls.getter('zzz')
cls.getter('bbb')
cls.getter('zzzx')
cls.getter('bbbr')
cls.getter('zzzy')
cls.getter('bbbg')
cls.getter('zzzz')
cls.getter('bbbb')
cls.getter('zzzw')
cls.getter('bbba')
cls.getter('zzw')
cls.getter('bba')
cls.getter('zzwx')
cls.getter('bbar')
cls.getter('zzwy')
cls.getter('bbag')
cls.getter('zzwz')
cls.getter('bbab')
cls.getter('zzww')
cls.getter('bbaa')
cls.getter('zw')
cls.getter('ba')
cls.getter('zwx')
cls.getter('bar')
cls.getter('zwxx')
cls.getter('barr')
cls.getter('zwxy')
cls.getter('barg')
cls.getter('zwxz')
cls.getter('barb')
cls.getter('zwxw')
cls.getter('bara')
cls.getter('zwy')
cls.getter('bag')
cls.getter('zwyx')
cls.getter('bagr')
cls.getter('zwyy')
cls.getter('bagg')
cls.getter('zwyz')
cls.getter('bagb')
cls.getter('zwyw')
cls.getter('baga')
cls.getter('zwz')
cls.getter('bab')
cls.getter('zwzx')
cls.getter('babr')
cls.getter('zwzy')
cls.getter('babg')
cls.getter('zwzz')
cls.getter('babb')
cls.getter('zwzw')
cls.getter('baba')
cls.getter('zww')
cls.getter('baa')
cls.getter('zwwx')
cls.getter('baar')
cls.getter('zwwy')
cls.getter('baag')
cls.getter('zwwz')
cls.getter('baab')
cls.getter('zwww')
cls.getter('baaa')
cls.getter('a')
cls.getter('wx')
cls.getter('ar')
cls.getter('wxx')
cls.getter('arr')
cls.getter('wxxx')
cls.getter('arrr')
cls.getter('wxxy')
cls.getter('arrg')
cls.getter('wxxz')
cls.getter('arrb')
cls.getter('wxxw')
cls.getter('arra')
cls.getter('wxy')
cls.getter('arg')
cls.getter('wxyx')
cls.getter('argr')
cls.getter('wxyy')
cls.getter('argg')
cls.getter('wxyz')
cls.getter('argb')
cls.getter('wxyw')
cls.getter('arga')
cls.getter('wxz')
cls.getter('arb')
cls.getter('wxzx')
cls.getter('arbr')
cls.getter('wxzy')
cls.getter('arbg')
cls.getter('wxzz')
cls.getter('arbb')
cls.getter('wxzw')
cls.getter('arba')
cls.getter('wxw')
cls.getter('ara')
cls.getter('wxwx')
cls.getter('arar')
cls.getter('wxwy')
cls.getter('arag')
cls.getter('wxwz')
cls.getter('arab')
cls.getter('wxww')
cls.getter('araa')
cls.getter('wy')
cls.getter('ag')
cls.getter('wyx')
cls.getter('agr')
cls.getter('wyxx')
cls.getter('agrr')
cls.getter('wyxy')
cls.getter('agrg')
cls.getter('wyxz')
cls.getter('agrb')
cls.getter('wyxw')
cls.getter('agra')
cls.getter('wyy')
cls.getter('agg')
cls.getter('wyyx')
cls.getter('aggr')
cls.getter('wyyy')
cls.getter('aggg')
cls.getter('wyyz')
cls.getter('aggb')
cls.getter('wyyw')
cls.getter('agga')
cls.getter('wyz')
cls.getter('agb')
cls.getter('wyzx')
cls.getter('agbr')
cls.getter('wyzy')
cls.getter('agbg')
cls.getter('wyzz')
cls.getter('agbb')
cls.getter('wyzw')
cls.getter('agba')
cls.getter('wyw')
cls.getter('aga')
cls.getter('wywx')
cls.getter('agar')
cls.getter('wywy')
cls.getter('agag')
cls.getter('wywz')
cls.getter('agab')
cls.getter('wyww')
cls.getter('agaa')
cls.getter('wz')
cls.getter('ab')
cls.getter('wzx')
cls.getter('abr')
cls.getter('wzxx')
cls.getter('abrr')
cls.getter('wzxy')
cls.getter('abrg')
cls.getter('wzxz')
cls.getter('abrb')
cls.getter('wzxw')
cls.getter('abra')
cls.getter('wzy')
cls.getter('abg')
cls.getter('wzyx')
cls.getter('abgr')
cls.getter('wzyy')
cls.getter('abgg')
cls.getter('wzyz')
cls.getter('abgb')
cls.getter('wzyw')
cls.getter('abga')
cls.getter('wzz')
cls.getter('abb')
cls.getter('wzzx')
cls.getter('abbr')
cls.getter('wzzy')
cls.getter('abbg')
cls.getter('wzzz')
cls.getter('abbb')
cls.getter('wzzw')
cls.getter('abba')
cls.getter('wzw')
cls.getter('aba')
cls.getter('wzwx')
cls.getter('abar')
cls.getter('wzwy')
cls.getter('abag')
cls.getter('wzwz')
cls.getter('abab')
cls.getter('wzww')
cls.getter('abaa')
cls.getter('ww')
cls.getter('aa')
cls.getter('wwx')
cls.getter('aar')
cls.getter('wwxx')
cls.getter('aarr')
cls.getter('wwxy')
cls.getter('aarg')
cls.getter('wwxz')
cls.getter('aarb')
cls.getter('wwxw')
cls.getter('aara')
cls.getter('wwy')
cls.getter('aag')
cls.getter('wwyx')
cls.getter('aagr')
cls.getter('wwyy')
cls.getter('aagg')
cls.getter('wwyz')
cls.getter('aagb')
cls.getter('wwyw')
cls.getter('aaga')
cls.getter('wwz')
cls.getter('aab')
cls.getter('wwzx')
cls.getter('aabr')
cls.getter('wwzy')
cls.getter('aabg')
cls.getter('wwzz')
cls.getter('aabb')
cls.getter('wwzw')
cls.getter('aaba')
cls.getter('www')
cls.getter('aaa')
cls.getter('wwwx')
cls.getter('aaar')
cls.getter('wwwy')
cls.getter('aaag')
cls.getter('wwwz')
cls.getter('aaab')
cls.getter('wwww')
cls.getter('aaaa')
cls = class_(mat2)
cls.slot('m00', float)
cls.slot('m01', float)
cls.slot('m10', float)
cls.slot('m11', float)
cls.method('__init__', float, float, float, float)
cls.method('__repr__')
cls.method('__mul__', (vec2, mat2, float))
cls.method('__imul__', (vec2, float))
cls = class_(mat3)
cls.slot('m00', float)
cls.slot('m01', float)
cls.slot('m02', float)
cls.slot('m10', float)
cls.slot('m11', float)
cls.slot('m12', float)
cls.slot('m20', float)
cls.slot('m21', float)
cls.slot('m22', float)
cls.method('__init__', float, float, float, float, float, float, float, float, float)
cls.method('__repr__')
cls.method('__mul__', (vec3, mat3, float))
cls.method('__imul__', (vec3, float))
cls = class_(mat4)
cls.slot('m00', float)
cls.slot('m01', float)
cls.slot('m02', float)
cls.slot('m03', float)
cls.slot('m10', float)
cls.slot('m11', float)
cls.slot('m12', float)
cls.slot('m13', float)
cls.slot('m20', float)
cls.slot('m21', float)
cls.slot('m22', float)
cls.slot('m23', float)
cls.slot('m30', float)
cls.slot('m31', float)
cls.slot('m32', float)
cls.slot('m33', float)
cls.method('__init__', float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float)
cls.method('__repr__')
cls.method('__mul__', (vec4, mat4, float))
cls.method('__imul__', (vec4, float))
| apache-2.0 | 8,601,827,295,417,819,000 | 19.110623 | 134 | 0.671527 | false |
rohitw1991/smarttailorfrappe | frappe/core/doctype/event/test_event.py | 1 | 1825 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""Use blog post test to test user permissions logic"""
import frappe
import frappe.defaults
import unittest
test_records = frappe.get_test_records('Event')
class TestEvent(unittest.TestCase):
# def setUp(self):
# user = frappe.get_doc("User", "[email protected]")
# user.add_roles("Website Manager")
def tearDown(self):
frappe.set_user("Administrator")
def test_allowed_public(self):
frappe.set_user("[email protected]")
doc = frappe.get_doc("Event", frappe.db.get_value("Event", {"subject":"_Test Event 1"}))
self.assertTrue(frappe.has_permission("Event", doc=doc))
def test_not_allowed_private(self):
frappe.set_user("[email protected]")
doc = frappe.get_doc("Event", frappe.db.get_value("Event", {"subject":"_Test Event 2"}))
self.assertFalse(frappe.has_permission("Event", doc=doc))
def test_allowed_private_if_in_event_user(self):
frappe.set_user("[email protected]")
doc = frappe.get_doc("Event", frappe.db.get_value("Event", {"subject":"_Test Event 3"}))
self.assertTrue(frappe.has_permission("Event", doc=doc))
def test_event_list(self):
frappe.set_user("[email protected]")
res = frappe.get_list("Event", filters=[["Event", "subject", "like", "_Test Event%"]], fields=["name", "subject"])
self.assertEquals(len(res), 2)
subjects = [r.subject for r in res]
self.assertTrue("_Test Event 1" in subjects)
self.assertTrue("_Test Event 3" in subjects)
self.assertFalse("_Test Event 2" in subjects)
def test_revert_logic(self):
ev = frappe.get_doc(test_records[0]).insert()
name = ev.name
frappe.delete_doc("Event", ev.name)
# insert again
ev = frappe.get_doc(test_records[0]).insert()
# the name should be same!
self.assertEquals(ev.name, name)
| mit | 2,131,896,046,726,191,900 | 32.181818 | 116 | 0.697534 | false |
fingeronthebutton/RIDE | src/robotide/lib/robot/testdoc.py | 1 | 9894 | #!/usr/bin/env python
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module implementing the command line entry point for the `Testdoc` tool.
This module can be executed from the command line using the following
approaches::
python -m robot.testdoc
python path/to/robot/testdoc.py
Instead of ``python`` it is possible to use also other Python interpreters.
This module also provides :func:`testdoc` and :func:`testdoc_cli` functions
that can be used programmatically. Other code is for internal usage.
"""
USAGE = """robot.testdoc -- Robot Framework test data documentation tool
Version: <VERSION>
Usage: python -m robot.testdoc [options] data_sources output_file
Testdoc generates a high level test documentation based on Robot Framework
test data. Generated documentation includes name, documentation and other
metadata of each test suite and test case, as well as the top-level keywords
and their arguments.
Options
=======
-T --title title Set the title of the generated documentation.
Underscores in the title are converted to spaces.
The default title is the name of the top level suite.
-N --name name Override the name of the top level suite.
-D --doc document Override the documentation of the top level suite.
-M --metadata name:value * Set/override metadata of the top level suite.
-G --settag tag * Set given tag(s) to all test cases.
-t --test name * Include tests by name.
-s --suite name * Include suites by name.
-i --include tag * Include tests by tags.
-e --exclude tag * Exclude tests by tags.
-h -? --help Print this help.
All options except --title have exactly same semantics as same options have
when executing test cases.
Execution
=========
Data can be given as a single file, directory, or as multiple files and
directories. In all these cases, the last argument must be the file where
to write the output. The output is always created in HTML format.
Testdoc works with all interpreters supported by Robot Framework (Python,
Jython and IronPython). It can be executed as an installed module like
`python -m robot.testdoc` or as a script like `python path/robot/testdoc.py`.
Examples:
python -m robot.testdoc my_test.html testdoc.html
jython -m robot.testdoc -N smoke_tests -i smoke path/to/my_tests smoke.html
ipy path/to/robot/testdoc.py first_suite.txt second_suite.txt output.html
For more information about Testdoc and other built-in tools, see
http://robotframework.org/robotframework/#built-in-tools.
"""
import os.path
import sys
import time
# Allows running as a script. __name__ check needed with multiprocessing:
# http://code.google.com/p/robotframework/issues/detail?id=1137
if 'robot' not in sys.modules and __name__ == '__main__':
import pythonpathsetter
from robotide.lib.robot.conf import RobotSettings
from robotide.lib.robot.htmldata import HtmlFileWriter, ModelWriter, JsonWriter, TESTDOC
from robotide.lib.robot.parsing import disable_curdir_processing
from robotide.lib.robot.running import TestSuiteBuilder
from robotide.lib.robot.utils import (abspath, Application, format_time, get_link_path,
html_escape, html_format, is_string,
secs_to_timestr, seq2str2, timestr_to_secs, unescape)
class TestDoc(Application):
def __init__(self):
Application.__init__(self, USAGE, arg_limits=(2,))
def main(self, datasources, title=None, **options):
outfile = abspath(datasources.pop())
suite = TestSuiteFactory(datasources, **options)
self._write_test_doc(suite, outfile, title)
self.console(outfile)
def _write_test_doc(self, suite, outfile, title):
with open(outfile, 'w') as output:
model_writer = TestdocModelWriter(output, suite, title)
HtmlFileWriter(output, model_writer).write(TESTDOC)
@disable_curdir_processing
def TestSuiteFactory(datasources, **options):
settings = RobotSettings(options)
if is_string(datasources):
datasources = [datasources]
suite = TestSuiteBuilder().build(*datasources)
suite.configure(**settings.suite_config)
return suite
class TestdocModelWriter(ModelWriter):
def __init__(self, output, suite, title=None):
self._output = output
self._output_path = getattr(output, 'name', None)
self._suite = suite
self._title = title.replace('_', ' ') if title else suite.name
def write(self, line):
self._output.write('<script type="text/javascript">\n')
self.write_data()
self._output.write('</script>\n')
def write_data(self):
generated_time = time.localtime()
model = {
'suite': JsonConverter(self._output_path).convert(self._suite),
'title': self._title,
'generated': format_time(generated_time, gmtsep=' '),
'generatedMillis': long(time.mktime(generated_time) * 1000)
}
JsonWriter(self._output).write_json('testdoc = ', model)
class JsonConverter(object):
def __init__(self, output_path=None):
self._output_path = output_path
def convert(self, suite):
return self._convert_suite(suite)
def _convert_suite(self, suite):
return {
'source': suite.source or '',
'relativeSource': self._get_relative_source(suite.source),
'id': suite.id,
'name': self._escape(suite.name),
'fullName': self._escape(suite.longname),
'doc': self._html(suite.doc),
'metadata': [(self._escape(name), self._html(value))
for name, value in suite.metadata.items()],
'numberOfTests': suite.test_count ,
'suites': self._convert_suites(suite),
'tests': self._convert_tests(suite),
'keywords': list(self._convert_keywords(suite))
}
def _get_relative_source(self, source):
if not source or not self._output_path:
return ''
return get_link_path(source, os.path.dirname(self._output_path))
def _escape(self, item):
return html_escape(item)
def _html(self, item):
return html_format(unescape(item))
def _convert_suites(self, suite):
return [self._convert_suite(s) for s in suite.suites]
def _convert_tests(self, suite):
return [self._convert_test(t) for t in suite.tests]
def _convert_test(self, test):
return {
'name': self._escape(test.name),
'fullName': self._escape(test.longname),
'id': test.id,
'doc': self._html(test.doc),
'tags': [self._escape(t) for t in test.tags],
'timeout': self._get_timeout(test.timeout),
'keywords': list(self._convert_keywords(test))
}
def _convert_keywords(self, item):
for kw in getattr(item, 'keywords', []):
if kw.type == kw.SETUP_TYPE:
yield self._convert_keyword(kw, 'SETUP')
elif kw.type == kw.TEARDOWN_TYPE:
yield self._convert_keyword(kw, 'TEARDOWN')
elif kw.type == kw.FOR_LOOP_TYPE:
yield self._convert_for_loop(kw)
else:
yield self._convert_keyword(kw, 'KEYWORD')
def _convert_for_loop(self, kw):
return {
'name': self._escape(self._get_for_loop(kw)),
'arguments': '',
'type': 'FOR'
}
def _convert_keyword(self, kw, kw_type):
return {
'name': self._escape(self._get_kw_name(kw)),
'arguments': self._escape(', '.join(kw.args)),
'type': kw_type
}
def _get_kw_name(self, kw):
if kw.assign:
return '%s = %s' % (', '.join(a.rstrip('= ') for a in kw.assign), kw.name)
return kw.name
def _get_for_loop(self, kw):
joiner = ' %s ' % kw.flavor
return ', '.join(kw.variables) + joiner + seq2str2(kw.values)
def _get_timeout(self, timeout):
if timeout is None:
return ''
try:
tout = secs_to_timestr(timestr_to_secs(timeout.value))
except ValueError:
tout = timeout.value
if timeout.message:
tout += ' :: ' + timeout.message
return tout
def testdoc_cli(arguments):
"""Executes `Testdoc` similarly as from the command line.
:param arguments: command line arguments as a list of strings.
For programmatic usage the :func:`testdoc` function is typically better. It
has a better API for that and does not call :func:`sys.exit` like
this function.
Example::
from robotide.lib.robot.testdoc import testdoc_cli
testdoc_cli(['--title', 'Test Plan', 'mytests', 'plan.html'])
"""
TestDoc().execute_cli(arguments)
def testdoc(*arguments, **options):
"""Executes `Testdoc` programmatically.
Arguments and options have same semantics, and options have same names,
as arguments and options to Testdoc.
Example::
from robotide.lib.robot.testdoc import testdoc
testdoc('mytests', 'plan.html', title='Test Plan')
"""
TestDoc().execute(*arguments, **options)
if __name__ == '__main__':
testdoc_cli(sys.argv[1:])
| apache-2.0 | 904,783,923,345,010,600 | 33.961131 | 88 | 0.63958 | false |
bcarmo-caio/jack | jack.py | 1 | 25244 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import math
import Image
import numpy
ESC = '\033'
DELTA = 60
CONFIG_DATA = []
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
class Vector():
def __init__(self, x0 = 0.0, x1 = 0.0, x2 = 0.0):
self.x0 = x0
self.x1 = x1
self.x2 = x2
def vectorProduct(self, v):
u = Vector()
# | i j k |
# | a b c |
# | d e f | = (bf - ce)i +
u.x0 = self.x1 * v.x2 - self.x2 * v.x1
# (cd - af)j +
u.x1 = self.x2 * v.x0 - self.x0 * v.x2
# (ae - bd)k
u.x2 = self.x0 * v.x1 - self.x1 * v.x0
return u
def vectorNormalize(self):
u = Vector()
norm = math.sqrt(self.x0*self.x0 + self.x1*self.x1 + self.x2*self.x2)
u.x0 = self.x0/norm
u.x1 = self.x1/norm
u.x2 = self.x2/norm
return u
def vectorMulScalar(self, alpha):
u = Vector()
u.x0 = alpha*self.x0
u.x1 = alpha*self.x1
u.x2 = alpha*self.x2
return u
def vectorNorm(self):
return math.sqrt(self.x0*self.x0 + self.x1*self.x1 + self.x2*self.x2)
class Texture():
def __init__(self):
self.texID = 0
def loadFromFile(self, fileName):
"""Load an image file as a 2D texture using PIL"""
try:
im = Image.open(fileName)
except IOError:
print ("Could not open %s"%fileName)
if fileName == "jack.jpg":
print (bcolors.FAIL +
"""Sorry, but Jack's been with us through the entire """
""" project... We cannot go on without him"""
+ bcolors.ENDC)
sys.exit("\"Why is the rum gone?\"")
else:
if fileName == "jack.jpg":
print (bcolors.OKGREEN + "Yo-ho-ho and a bottle of rum!" +
bcolors.ENDC)
imData = numpy.array(list(im.getdata()), numpy.int8)
texImage, w, h = imData, im.size[0], im.size[1]
self.texID = glGenTextures(1)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
glBindTexture(GL_TEXTURE_2D, self.texID)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, w, h, 0, GL_RGB,
GL_UNSIGNED_BYTE, texImage)
def setTexture(self):
glBindTexture(GL_TEXTURE_2D, self.texID)
def enableTextures(self):
glEnable(GL_TEXTURE_2D)
def disableTextures(self):
glDisable(GL_TEXTURE_2D)
skyTexture1 = Texture()
skyTexture2 = Texture()
skyTexture3 = Texture()
skyTexture4 = Texture()
skyTexture5 = Texture()
trackTexture = Texture()
jackTexture = Texture()
kartTexture = Texture()
class World():
def __init__(self):
self.trackPts = []
self.normalMatrix = []
self.CUBE_Xmin = 0.
self.CUBE_Xmax = 400.
self.CUBE_Ymin = 0.
self.CUBE_Ymax = 400.
self.CUBE_Zmin = 0.
self.CUBE_Zmax = 400.
self.trianglesAmountOnXAxis = 40 #
self.trianglesAmountOnZAxis = 40 #
self.sizeCubeX = self.CUBE_Xmax - self.CUBE_Xmin
self.sizeCubeZ = self.CUBE_Zmax - self.CUBE_Zmin
self.scaleZ = self.sizeCubeZ / self.trianglesAmountOnZAxis
self.scaleX = self.sizeCubeX / self.trianglesAmountOnXAxis
self.scaleY = 1.
def drawSky(self):
skyTexture1.enableTextures()
skyTexture1.setTexture()
glBegin(GL_QUADS)
#face z = CUBE_Zmax
glTexCoord2f(0.0, 0.0); glVertex3f(self.CUBE_Xmin, self.CUBE_Ymin,
self.CUBE_Zmax);
glTexCoord2f(1.0, 0.0); glVertex3f(self.CUBE_Xmax, self.CUBE_Ymin,
self.CUBE_Zmax);
glTexCoord2f(1.0, 1.0); glVertex3f(self.CUBE_Xmax, self.CUBE_Ymax,
self.CUBE_Zmax);
glTexCoord2f(0.0, 1.0); glVertex3f(self.CUBE_Xmin, self.CUBE_Ymax,
self.CUBE_Zmax);
glEnd()
skyTexture1.disableTextures()
skyTexture2.enableTextures()
skyTexture2.setTexture()
glBegin(GL_QUADS)
#face y = CUBE_Ymax
glTexCoord2f(0.0, 1.0); glVertex3f(self.CUBE_Xmin, self.CUBE_Ymax,
self.CUBE_Zmin);
glTexCoord2f(0.0, 0.0); glVertex3f(self.CUBE_Xmin, self.CUBE_Ymax,
self.CUBE_Zmax);
glTexCoord2f(1.0, 0.0); glVertex3f(self.CUBE_Xmax, self.CUBE_Ymax,
self.CUBE_Zmax);
glTexCoord2f(1.0, 1.0); glVertex3f(self.CUBE_Xmax, self.CUBE_Ymax,
self.CUBE_Zmin);
glEnd()
skyTexture2.disableTextures()
skyTexture3.enableTextures()
skyTexture3.setTexture()
glBegin(GL_QUADS)
#face x = CUBE_Xmax
glTexCoord2f(1.0, 0.0); glVertex3f(self.CUBE_Xmax, self.CUBE_Ymin,
self.CUBE_Zmin);
glTexCoord2f(1.0, 1.0); glVertex3f(self.CUBE_Xmax, self.CUBE_Ymax,
self.CUBE_Zmin);
glTexCoord2f(0.0, 1.0); glVertex3f(self.CUBE_Xmax, self.CUBE_Ymax,
self.CUBE_Zmax);
glTexCoord2f(0.0, 0.0); glVertex3f(self.CUBE_Xmax, self.CUBE_Ymin,
self.CUBE_Zmax);
glEnd()
skyTexture3.disableTextures()
skyTexture4.enableTextures()
skyTexture4.setTexture()
glBegin(GL_QUADS)
#face x = CUBE_Xmin
glTexCoord2f(0.0, 0.0); glVertex3f(self.CUBE_Xmin, self.CUBE_Ymin,
self.CUBE_Zmin);
glTexCoord2f(1.0, 0.0); glVertex3f(self.CUBE_Xmin, self.CUBE_Ymin,
self.CUBE_Zmax);
glTexCoord2f(1.0, 1.0); glVertex3f(self.CUBE_Xmin, self.CUBE_Ymax,
self.CUBE_Zmax);
glTexCoord2f(0.0, 1.0); glVertex3f(self.CUBE_Xmin, self.CUBE_Ymax,
self.CUBE_Zmin);
glEnd()
skyTexture4.disableTextures()
skyTexture5.enableTextures()
skyTexture5.setTexture()
glBegin(GL_QUADS)
#face z = CUBE_Zmin
glTexCoord2f(1.0, 0.0); glVertex3f(self.CUBE_Xmin, self.CUBE_Ymin,
self.CUBE_Zmin);
glTexCoord2f(1.0, 1.0); glVertex3f(self.CUBE_Xmin, self.CUBE_Ymax,
self.CUBE_Zmin);
glTexCoord2f(0.0, 1.0); glVertex3f(self.CUBE_Xmax, self.CUBE_Ymax,
self.CUBE_Zmin);
glTexCoord2f(0.0, 0.0); glVertex3f(self.CUBE_Xmax, self.CUBE_Ymin,
self.CUBE_Zmin);
glEnd()
skyTexture5.disableTextures()
def drawTrack(self):
#"face y = CUBE_Ymin"
trackTexture.enableTextures()
trackTexture.setTexture()
# Triangles draw order.
# 1-------3 (3->1 of next iteration)
# | /|
# | / |
# | / |
# | / |
# | / |
# | / |
# 2-------4 (4->2 of next iteration)
#
# i=0,k=0 ___________________ i=0,k=m
# | /| /| /| /| /| /|
# |/_|/_|/_|/_|/_|/_|
# | /| /| /| /| /| /|
# |/_|/_|/_|/_|/_|/_|
# | /| /| /| /| /| /|
# +Z<______|/_|/_|/_|/_|/_|/_|
# | /| /| /| /| /| /|
# |/_|/_|/_|/_|/_|/_|
# | /| /| /| /| /| /|
# |/_|/_|/_|/_|/_|/_|
# | /| /| /| /| /| /|
# i=n,k=0|/_|/_|/_|/_|/_|/_| i=n,k=n
# |
# |
# |
# \/
# +X
for k in xrange(0, self.trianglesAmountOnZAxis):
for i in xrange(0, self.trianglesAmountOnXAxis):
glBegin(GL_TRIANGLE_STRIP)
#1
glTexCoord2f(i*self.scaleX/self.sizeCubeX,
k*self.scaleZ/self.sizeCubeZ)
n = world.normalMatrix[i][k][1]
glNormal3f(n.x0, n.x1, n.x2)
glVertex3f(self.CUBE_Xmin + i*self.scaleX,
self.CUBE_Ymin + self.trackPts[i][k]/self.scaleY,
self.CUBE_Zmin + k*self.scaleZ)
#2
glTexCoord2f(i*self.scaleX/self.sizeCubeX,
(k+1)*self.scaleZ/self.sizeCubeZ)
n = world.normalMatrix[i][k][1]
glNormal3f(n.x0, n.x1, n.x2)
glVertex3f(self.CUBE_Xmin + i*self.scaleX,
self.CUBE_Ymin + self.trackPts[i][k+1]/self.scaleY,
self.CUBE_Zmin + (k+1)*self.scaleZ)
#3
glTexCoord2f((i+1)*self.scaleX/self.sizeCubeX,
k*self.scaleZ/self.sizeCubeZ)
n = world.normalMatrix[i][k][1]
glNormal3f(n.x0, n.x1, n.x2)
glVertex3f(self.CUBE_Xmin + (i+1)*self.scaleX,
self.CUBE_Ymin + self.trackPts[i+1][k]/self.scaleY,
self.CUBE_Zmin + k*self.scaleZ)
#4
glTexCoord2f((i+1)*self.scaleX/self.sizeCubeX,
(k+1)*self.scaleZ/self.sizeCubeZ)
n = world.normalMatrix[i][k][0]
glNormal3f(n.x0, n.x1, n.x2)
glVertex3f(self.CUBE_Xmin + (i+1)*self.scaleX,
self.CUBE_Ymin + self.trackPts[i+1][k+1]/self.scaleY,
self.CUBE_Zmin + (k+1)*self.scaleZ)
glEnd()
trackTexture.disableTextures()
def drawAxes(self):
glLineWidth(10.)
glColor3f(1., 0., 0.)
glBegin(GL_LINES)
glVertex3f( self.CUBE_Xmin,
(self.CUBE_Ymax - self.CUBE_Ymin)/2.,
(self.CUBE_Zmax - self.CUBE_Zmin)/2.)
glVertex3f( self.CUBE_Xmax,
(self.CUBE_Ymax - self.CUBE_Ymin)/2.,
(self.CUBE_Zmax - self.CUBE_Zmin)/2.)
glEnd()
glLineWidth(10.)
glColor3f(0., 1., 0.)
glBegin(GL_LINES)
glVertex3f( (self.CUBE_Xmax - self.CUBE_Xmin)/2.,
self.CUBE_Ymin,
(self.CUBE_Zmax - self.CUBE_Zmin)/2.)
glVertex3f( (self.CUBE_Xmax - self.CUBE_Xmin)/2.,
self.CUBE_Xmax,
(self.CUBE_Zmax - self.CUBE_Zmin)/2.)
glEnd()
glLineWidth(10.)
glColor3f(0., 0., 1.)
glBegin(GL_LINES)
glVertex3f( (self.CUBE_Xmax - self.CUBE_Xmin)/2.,
(self.CUBE_Ymax - self.CUBE_Ymin)/2.,
self.CUBE_Zmin)
glVertex3f( (self.CUBE_Xmax - self.CUBE_Xmin)/2.,
(self.CUBE_Ymax - self.CUBE_Ymin)/2.,
self.CUBE_Zmax)
glEnd()
def drawWorld(self):
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)
self.drawSky()
self.drawTrack()
self.drawAxes()
for i in xrange(0, len(trees)):
trees[i].drawTree()
kart.drawKart()
jack.drawJack()
world = World()
def getTrianglePosition(Px, Pz):
i = int(math.floor(Px/world.scaleX))
j = int(math.floor(Pz/world.scaleX))
mantissaX, integerX = math.modf(Px) # we don't use integerX
mantissaZ, integerZ = math.modf(Pz) # we don't use integerZ
#avoiding tan(pi/2), which does not exist
if mantissaZ < math.pow(10., -6): #our error limit
return i, j, 1
elif mantissaX < math.pow(10., -6):
return i, j, 0
t = math.degrees(math.atan(mantissaZ/mantissaX))
if t < 45. :
return i, j, 0
elif t < 90. :
return i, j, 1
else :
print "Error CameraTrianglePosition. Should not reach here. Aborting..."
sys.exit(-2)
def fillNormalMatrix():
global world
for k in xrange(0, world.trianglesAmountOnZAxis):
l=[]
for i in xrange(0, world.trianglesAmountOnXAxis):
#calculatin normals
# <2-1, 3-1> #these number are vertices of the those
#triangles described in drawTrack
a = Vector( 0.,
(world.trackPts[i][k+1] -
world.trackPts[i][k])/world.scaleY,
world.scaleZ)
b = Vector( world.scaleX,
(world.trackPts[i+1][k] -
world.trackPts[i][k])/world.scaleY,
0.)
u = a.vectorProduct(b)
# <4-2, 3-2>
a = Vector( world.scaleX,
(world.trackPts[i+1][k+1] -
world.trackPts[i][k+1])/world.scaleY,
0.)
b = Vector( world.scaleX,
(world.trackPts[i+1][k] -
world.trackPts[i][k+1])/world.scaleY,
world.scaleZ)
v = a.vectorProduct(b)
u = u.vectorNormalize()
v = v.vectorNormalize()
l.append((u, v))
world.normalMatrix.append(l)
class Jack():
def drawJack(self):
Ht = 55
base = 25
jackTexture.enableTextures()
jackTexture.setTexture()
glPushMatrix()
glTranslatef( (world.CUBE_Xmax - world.CUBE_Xmin)/2.,
0,
(world.CUBE_Zmax - world.CUBE_Zmin)/2.)
glBegin(GL_QUADS)
#base (y<) - not seen
#walls
#left - x<
glNormal3f(-1, 0, 0)
glTexCoord2f(0., 0.); glVertex3f(-base/2., 0, -base/2.);
glTexCoord2f(0., 1.); glVertex3f(-base/2., 0, base/2.);
glTexCoord2f(1., 1.); glVertex3f(-base/2., Ht, base/2.);
glTexCoord2f(1., 0.); glVertex3f(-base/2., Ht, -base/2.);
#right - X>
glNormal3f(1, 0, 0)
glTexCoord2f(1., 1.); glVertex3f(base/2., 0, -base/2.)
glTexCoord2f(1., 0.); glVertex3f(base/2., 0, base/2.)
glTexCoord2f(0., 0.); glVertex3f(base/2., Ht, base/2.)
glTexCoord2f(0., 1.); glVertex3f(base/2., Ht, -base/2.)
#far - z<
glNormal3f(0, 0, -1)
glTexCoord2f(0., 0.); glVertex3f(-base/2., 0, -base/2.)
glTexCoord2f(0., 1.); glVertex3f(-base/2., Ht, -base/2.)
glTexCoord2f(1., 1.); glVertex3f( base/2., Ht, -base/2.)
glTexCoord2f(1., 0.); glVertex3f( base/2., 0, -base/2.)
#near - Z>
glNormal3f(0, 0, 1)
glTexCoord2f(1., 1.); glVertex3f(-base/2., 0, base/2.)
glTexCoord2f(1., 0.); glVertex3f(-base/2., Ht, base/2.)
glTexCoord2f(0., 0.); glVertex3f( base/2., Ht, base/2.)
glTexCoord2f(0., 1.); glVertex3f( base/2., 0, base/2.)
glEnd()
glPopMatrix()
jackTexture.disableTextures()
jack = Jack()
class Tree():
def __init__(self):
self.Ht = 0.
self.Hc = 0.
self.base = 0.
self.Rt = 0.; self.Gt = 0.; self.Bt = 0.
self.Rc = 0.; self.Gc = 0.; self.Bc = 0.
self.Px = 0.; self.Py = 0.; self.Pz = 0.
def treeSetValues(self, Px, Pz, Ht, Hc, Rt, Gt, Bt, Rc, Gc, Bc):
Py = 0.
self.base = 5.
scaleT = 33.
scaleC = 15.
self.Ht = Ht * scaleT
self.Hc = Hc * scaleC
self.Rt = Rt; self.Gt = Gt; self.Bt = Bt
self.Rc = Rc; self.Gc = Gc; self.Bc = Bc
self.Px = Px; self.Py = Py; self.Pz = Pz
def drawTree(self):
#base (y<) - not seen
#trunk
colorT = [self.Rt, self.Gt, self.Bt]
glMaterialfv(GL_FRONT, GL_DIFFUSE, colorT);
glPushMatrix()
glTranslatef(self.Px, self.Py, self.Pz)
glBegin(GL_QUADS)
#left - x<
glNormal3f(-1, 0, 0)
glVertex3f(-self.base/2., 0, -self.base/2.);
glVertex3f(-self.base/2., 0, self.base/2.);
glVertex3f(-self.base/2., self.Ht, self.base/2.);
glVertex3f(-self.base/2., self.Ht, -self.base/2.);
#right - X>
glNormal3f(1, 0, 0)
glVertex3f(self.base/2., 0, -self.base/2.)
glVertex3f(self.base/2., 0, self.base/2.)
glVertex3f(self.base/2., self.Ht, self.base/2.)
glVertex3f(self.base/2., self.Ht, -self.base/2.)
#far - z<
glNormal3f(0, 0, -1)
glVertex3f(-self.base/2., 0, -self.base/2.)
glVertex3f(-self.base/2., self.Ht, -self.base/2.)
glVertex3f( self.base/2., self.Ht, -self.base/2.)
glVertex3f( self.base/2., 0, -self.base/2.)
#near - Z>
glNormal3f(0, 0, 1)
glVertex3f(-self.base/2., 0, self.base/2.)
glVertex3f(-self.base/2., self.Ht, self.base/2.)
glVertex3f( self.base/2., self.Ht, self.base/2.)
glVertex3f( self.base/2., 0, self.base/2.)
#top - Y>
glNormal3f(0, 1, 0)
glVertex3f(-self.base/2., self.Ht, self.base/2.)
glVertex3f(-self.base/2., self.Ht, -self.base/2.)
glVertex3f( self.base/2., self.Ht, -self.base/2.)
glVertex3f( self.base/2., self.Ht, self.base/2.)
glEnd()
#crown
colorC = [self.Rc, self.Gc, self.Bc]
glMaterialfv(GL_FRONT, GL_DIFFUSE, colorC);
glTranslatef(0, self.Ht, 0)
glRotatef(-90, 1, 0, 0)
glutSolidCone(self.base, self.Hc, 50, 50);
glPopMatrix()
trees = []
def loadTrees(t):
global trees
for i in xrange(0, len(t)):
trees.append(Tree())
trees[i].treeSetValues(t[i][1], t[i][2], t[i][3], t[i][4], t[i][5],
#Px Py Ht Hc Rt
t[i][6], t[i][7], t[i][8], t[i][9], t[i][10])
#Gt Bt Rc Gc Bc
def animacao( id ):
glutPostRedisplay()
glutTimerFunc(DELTA, animacao, 0)
kart.kartMove()
def loadTextures():
skyTexture1.loadFromFile(CONFIG_DATA[0])
skyTexture2.loadFromFile(CONFIG_DATA[1])
skyTexture3.loadFromFile(CONFIG_DATA[2])
skyTexture4.loadFromFile(CONFIG_DATA[3])
skyTexture5.loadFromFile(CONFIG_DATA[4])
trackTexture.loadFromFile(CONFIG_DATA[7])
jackTexture.loadFromFile('jack.jpg')
kartTexture.loadFromFile('bola.jpg')
def init(configFile = 'config.txt'):
global world
global kart
global CONFIG_DATA
glutInit(sys.argv)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH)
glutInitWindowSize(500, 500)
glutInitWindowPosition(600, 200)
glutCreateWindow(sys.argv[0])
glClearColor(0.5, 0.5, 0.5, 0.0)
#glShadeModel(GL_FLAT)
glEnable(GL_DEPTH_TEST) #this IS important
glutDisplayFunc(display)
glutReshapeFunc(reshape)
glutKeyboardFunc(keyboard)
glutTimerFunc(DELTA, animacao, 0)
CONFIG_DATA = carregueConfig(configFile)
trackTexName, texData, w, h, world.trackPts = carreguePista(CONFIG_DATA[5],
False)
k, trees = carregueDetalhes(CONFIG_DATA[6], False)
loadTrees(trees)
fillNormalMatrix()
k = k[0]
kart = Kart(k[1], k[2], k[3], k[4], k[5], k[6], k[7])
kart.resetKart()
CONFIG_DATA.append(trackTexName)
loadTextures()
def display():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
LIGHT0_POSITION = [0.0, 0.0, 9999.0, 0.0]
glLightfv(GL_LIGHT0, GL_POSITION, LIGHT0_POSITION);
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(camera.Ex, camera.Ey, camera.Ez,
camera.Px, camera.Py, camera.Pz,
camera.Ux, camera.Uy, camera.Uz)
world.drawWorld()
glutSwapBuffers()
def reshape(w, h):
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45, 1.0*w / h, 0.05, 700)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(camera.Ex, camera.Ey, camera.Ez,
camera.Px, camera.Py, camera.Pz,
camera.Ux, camera.Uy, camera.Uz)
class Camera():
def __init__(self):
self.RO = 80.
self.yDist = 35.
self.Ex = 0.
self.Ey = 0.
self.Ez = 0.
self.Px = 0.
self.Py = 0.
self.Pz = 0.
self.Ux = 0.
self.Uy = 0.
self.Uz = 0.
def defineCamera(self,
eyeX, eyeY, eyeZ,
centerX, centerY,
centerZ, upX, upY, upZ):
correctionXmin = 3.
correctionXmax = -3.
correctionYmin = 3.
correctionYmax = -3.
correctionZmin = 3.
correctionZmax = -3.
#camera cannot get out from world. God of War effect. Hell yeah! ì.í
if eyeX < world.CUBE_Xmin:
print "avoiding camera going out on X<"
eyeX = world.CUBE_Xmin + correctionXmin
elif eyeX > world.CUBE_Xmax:
print "avoiding camera going out on X>"
eyeX = world.CUBE_Xmax + correctionXmax
if eyeY < world.CUBE_Ymin:
print "Avoiding camera going out on Y<"
eyeY = world.CUBE_Ymin + correctionYmin
elif eyeY > world.CUBE_Ymax:
print "Avoiding camera going out on Y>"
eyeY = world.CUBE_Ymax + correctionYmax
if eyeZ < world.CUBE_Zmin:
print "Avoiding camera going out on Z<"
eyeZ = world.CUBE_Zmin + correctionZmin
elif eyeZ > world.CUBE_Zmax:
print "Avoiding camera going out on Z>"
eyeZ = world.CUBE_Zmax + correctionZmax
self.Ex = eyeX
self.Ey = eyeY
self.Ez = eyeZ
self.Px = centerX
self.Py = centerY
self.Pz = centerZ
self.Ux = upX
self.Uy = upY
self.Uz = upZ
def setCameraHeight(self, h):
self.yDist = h
def resetCamera(self):
u = Vector(kart.Vd.x0, 0., kart.Vd.x2)
u = u.vectorNormalize()
u.x0 = kart.Px - self.RO*u.x0
u.x2 = kart.Pz - self.RO*u.x2
Ex, Ey, Ez = u.x0, kart.Py + self.yDist, u.x2
Px, Py, Pz = kart.Px, kart.Py, kart.Pz
Ux, Uy, Uz = .0, 1., .0
self.defineCamera(Ex, Ey, Ez,
Px, Py, Pz,
Ux, Uy, Uz)
camera = Camera()
class Kart():
def __init__(self, Px = 0., Pz = 0., R = 3.,
Rk = 0., Gk = 0., Bk = 0., THETA = 0.):
self.ROTHETA = math.radians(THETA) # read only
self.THETA = math.radians(THETA)
self.ROPx = Px # read only
self.Px = Px
self.Py = 0.
self.ROPz = Pz # read only
self.Pz = Pz
self.Rk = Rk
self.Gk = Gk
self.Bk = Bk
self.Vd = Vector()
self.V = self.Vd.vectorNorm()
self.R = R
self.distUpFromTrack = self.R
if self.V > 0.0:
self.resetKart()
def drawKart(self):
qObj = gluNewQuadric();
gluQuadricNormals(qObj, GLU_SMOOTH);
gluQuadricTexture(qObj, GL_TRUE);
#kartTexture.enableTextures()
#kartTexture.setTexture()
colorT = [self.Rk, self.Gk, self.Bk]
glMaterialfv(GL_FRONT, GL_DIFFUSE, colorT);
glPushMatrix()
glTranslatef(self.Px, self.Py, self.Pz)
gluSphere(qObj, self.R, 100, 100);
glPopMatrix()
#kartTexture.disableTextures()
glLineWidth(10.)
glColor3f(0., 1., 0.)
glBegin(GL_LINES)
glVertex3f(self.Px, self.Py, self.Pz)
glVertex3f(self.Px + self.V * self.Vd.x0,
self.Py + self.V * self.Vd.x1,
self.Pz + self.V * self.Vd.x2)
glEnd()
def resetKart(self):
self.THETA = self.ROTHETA
i, k, c = getTrianglePosition(self.ROPx, self.ROPz) #??
hv1 = world.trackPts[i][k]
hv2 = world.trackPts[i][k+1]
hv3 = world.trackPts[i+1][k]
self.Px = self.ROPx
self.Py = (hv1 + hv2 + hv3)/3. + self.R
self.Pz = self.ROPz
self.V = 0.
self.Vd.x0 = math.cos(self.THETA)
self.Vd.x2 = math.sin(self.THETA)
camera.resetCamera()
def accel(self, a, b):
self.V = self.V * a + b
def kartMove(self):
if self.V > 0.0001 and self.V < -0.0001: #avoid division by 0
return
Kx = self.Px + self.Vd.x0 * self.V
Kz = self.Pz + self.Vd.x2 * self.V
i, k, j = getTrianglePosition(Kx, Kz)
j = (world.trackPts[i][k] +
world.trackPts[i][k+1] +
world.trackPts[i+1][k])/3.
Ky = self.distUpFromTrack + j
self.Px, self.Py, self.Pz = Kx, Ky, Kz
camera.resetCamera()
def kartTurnRight(self):
self.THETA = (self.THETA + 0.1) % (2*math.pi)
u = Vector()
u.x0 = self.Px*math.cos(self.THETA) - self.Pz*math.sin(self.THETA)
u.x2 = self.Px*math.sin(self.THETA) + self.Pz*math.cos(self.THETA)
u.x1 = 0.
u = u.vectorNormalize()
self.Vd.x0 = u.x0
self.Vd.x2 = u.x2
camera.resetCamera()
def kartTurnLeft(self):
self.THETA = (self.THETA - 0.1) % (2*math.pi)
u = Vector()
u.x0 = self.Px*math.cos(self.THETA) - self.Pz*math.sin(self.THETA)
u.x2 = self.Px*math.sin(self.THETA) + self.Pz*math.cos(self.THETA)
u.x1 = 0.
u = u.vectorNormalize()
self.Vd.x0 = u.x0
self.Vd.x2 = u.x2
camera.resetCamera()
kart = Kart()
def keyboard(key, x, y):
if (key == ESC) or (key == 'q'):
sys.exit(0)
elif key == 'j':
kart.kartTurnLeft()
elif key == 'l':
kart.kartTurnRight()
elif key == 'i':
camera.yDist += 1.
elif key == 'k':
camera.yDist -= 1.
elif key == 'a':
kart.accel(1., 0.04)
elif key == 'z':
kart.accel(1., -0.04)
elif key == 'v':
camera.resetCamera()
elif key == 'b':
kart.resetKart()
#####################################################
#####################################################
T = 0 # top
N = 1 # north
E = 2 # east
S = 3 # south
W = 4 # west
P = 5 # course
D = 6 # detail
# ----------------------------------------------------------------------
def carregueConfig(fileName):
""" (file) -> list of file names
retorna uma lista com os nomes dos arquivos contidos no
arquivo de configuração.
"""
try:
fp = open(fileName)
except IOError:
print ("Não consegui abrir o arquivo %s"%(fileName))
sys.exit(-1)
name = []
for line in fp:
name.append(line.strip())
fp.close()
return name
# ----------------------------------------------------------------------
def carregueDetalhes( fileName, verbose = False ):
""" (file) -> names
retorna listas, cada lista com os objetos de um tipo
contidos no arquivo de detalhes.
"""
try:
fp = open(fileName)
except IOError:
print ("Não consegui abrir o arquivo %s"%(fileName))
sys.exit(-1)
kart = []
tree = []
for line in fp:
l = line.strip().split()
for i in range(1, len(l)):
l[i] = float(l[i])
if l[0] == 'K':
if verbose:
print ("Achei um kart", l)
kart.append(l)
elif l[0] == 'A':
if verbose:
print ("Achei uma arvore", l)
tree.append(l)
else:
if verbose:
print("Nao entendi a linha", l)
fp.close()
return kart, tree
# ----------------------------------------------------------------------
def carreguePista( fileName, verbose = False ):
""" (file) -> texData, texWidth, texHeight, trackPts
retorna a textura (data, largura e altura)
e uma matriz com o mapa de elevação da pista.
"""
try:
fp = open(fileName)
except IOError:
print ("Não consegui abrir o arquivo %s"%(fileName))
sys.exit(-1)
nameTextureTrack = fp.readline().strip()
img = Image.open(nameTextureTrack)
texData = numpy.array(list(img.getdata()), numpy.int8)
w, h = img.size[0], img.size[1]
xmin, ymin = fp.readline().strip().split()
xmax, ymax = fp.readline().strip().split()
xmin = int(xmin)
xmax = int(xmax)
ymin = int(ymin)
ymax = int(ymax)
trackPts = []
for line in fp:
l = [float(x) for x in line.strip().split()]
trackPts.append(l)
fp.close()
if verbose:
print("Carreguei a textura de %s"%(fileName))
print("Retangulo: inf esq (%d x %d) x sup dir (%d x %d)"
%(xmin, ymin, xmax, ymax))
print("Texture dimension: %d x %d"%(w, h))
print("Track elevation dim: %d x %d"%(len(trackPts), len(trackPts[0])))
return nameTextureTrack, texData, w, h, trackPts
#####################################################
#####################################################
if __name__ == "__main__":
if len(sys.argv) > 1:
init(sys.argv[1])
else:
init('config.txt')
print "ESC or 'q' to quit this master game"
glutMainLoop()
| gpl-3.0 | -5,939,310,564,344,373,000 | 26.822492 | 79 | 0.592471 | false |
p-l-/miasm | miasm2/jitter/jitload.py | 1 | 13521 | #!/usr/bin/env python
import logging
from functools import wraps
from collections import Sequence, namedtuple
from miasm2.jitter.csts import *
from miasm2.core.utils import *
from miasm2.core.bin_stream import bin_stream_vm
from miasm2.ir.ir2C import init_arch_C
hnd = logging.StreamHandler()
hnd.setFormatter(logging.Formatter("[%(levelname)s]: %(message)s"))
log = logging.getLogger('jitload.py')
log.addHandler(hnd)
log.setLevel(logging.CRITICAL)
log_func = logging.getLogger('jit function call')
log_func.addHandler(hnd)
log_func.setLevel(logging.CRITICAL)
try:
from miasm2.jitter.jitcore_tcc import JitCore_Tcc
except ImportError:
log.error('cannot import jit tcc')
try:
from miasm2.jitter.jitcore_llvm import JitCore_LLVM
except ImportError:
log.error('cannot import jit llvm')
try:
from miasm2.jitter.jitcore_python import JitCore_Python
except ImportError:
log.error('cannot import jit python')
try:
from miasm2.jitter import VmMngr
except ImportError:
log.error('cannot import VmMngr')
def named_arguments(func):
"""Function decorator to allow the use of .func_args_*() methods
with either the number of arguments or the list of the argument
names.
The wrapper is also used to log the argument values.
@func: function
"""
@wraps(func)
def newfunc(self, args):
if isinstance(args, Sequence):
ret_ad, arg_vals = func(self, len(args))
arg_vals = namedtuple("args", args)(*arg_vals)
# func_name(arguments) return address
log_func.info('%s(%s) ret addr: %s',
whoami(),
', '.join("%s=0x%x" % (field, value)
for field, value in arg_vals._asdict().iteritems()),
hex(ret_ad))
return ret_ad, namedtuple("args", args)(*arg_vals)
else:
ret_ad, arg_vals = func(self, args)
# func_name(arguments) return address
log_func.info('%s(%s) ret addr: %s',
whoami(),
', '.join(hex(arg) for arg in arg_vals),
hex(ret_ad))
return ret_ad, arg_vals
return newfunc
class CallbackHandler(object):
"Handle a list of callback"
def __init__(self):
self.callbacks = {} # Key -> [callback list]
def add_callback(self, name, callback):
"""Add a callback to the key @name, iff the @callback isn't already
assigned to it"""
if callback not in self.callbacks.get(name, []):
self.callbacks[name] = self.callbacks.get(name, []) + [callback]
def set_callback(self, name, *args):
"Set the list of callback for key 'name'"
self.callbacks[name] = list(args)
def get_callbacks(self, name):
"Return the list of callbacks associated to key 'name'"
return self.callbacks.get(name, [])
def remove_callback(self, callback):
"""Remove the callback from the list.
Return the list of empty keys (removed)"""
to_check = set()
for key, cb_list in self.callbacks.items():
try:
cb_list.remove(callback)
to_check.add(key)
except ValueError:
pass
empty_keys = []
for key in to_check:
if len(self.callbacks[key]) == 0:
empty_keys.append(key)
del(self.callbacks[key])
return empty_keys
def call_callbacks(self, name, *args):
"""Call callbacks associated to key 'name' with arguments args. While
callbacks return True, continue with next callback.
Iterator on other results."""
res = True
for c in self.get_callbacks(name):
res = c(*args)
if res is not True:
yield res
def __call__(self, name, *args):
"Wrapper for call_callbacks"
return self.call_callbacks(name, *args)
class CallbackHandlerBitflag(CallbackHandler):
"Handle a list of callback with conditions on bitflag"
def __call__(self, bitflag, *args):
"""Call each callbacks associated with bit set in bitflag. While
callbacks return True, continue with next callback.
Iterator on other results"""
res = True
for b in self.callbacks.keys():
if b & bitflag != 0:
# If the flag matched
for res in self.call_callbacks(b, *args):
if res is not True:
yield res
class ExceptionHandle():
"Return type for exception handler"
def __init__(self, except_flag):
self.except_flag = except_flag
@classmethod
def memoryBreakpoint(cls):
return cls(EXCEPT_BREAKPOINT_INTERN)
def __eq__(self, to_cmp):
if not isinstance(to_cmp, ExceptionHandle):
return False
return (self.except_flag == to_cmp.except_flag)
class jitter:
"Main class for JIT handling"
def __init__(self, ir_arch, jit_type="tcc"):
"""Init an instance of jitter.
@ir_arch: ir instance for this architecture
@jit_type: JiT backend to use. Available options are:
- "tcc"
- "llvm"
- "python"
"""
self.arch = ir_arch.arch
self.attrib = ir_arch.attrib
arch_name = ir_arch.arch.name # (ir_arch.arch.name, ir_arch.attrib)
if arch_name == "x86":
from miasm2.jitter.arch import JitCore_x86 as jcore
elif arch_name == "arm":
from miasm2.jitter.arch import JitCore_arm as jcore
elif arch_name == "msp430":
from miasm2.jitter.arch import JitCore_msp430 as jcore
elif arch_name == "mips32":
from miasm2.jitter.arch import JitCore_mips32 as jcore
else:
raise ValueError("unsupported jit arch!")
self.vm = VmMngr.Vm()
self.cpu = jcore.JitCpu()
self.bs = bin_stream_vm(self.vm)
self.ir_arch = ir_arch
init_arch_C(self.arch)
if jit_type == "tcc":
self.jit = JitCore_Tcc(self.ir_arch, self.bs)
elif jit_type == "llvm":
self.jit = JitCore_LLVM(self.ir_arch, self.bs)
elif jit_type == "python":
self.jit = JitCore_Python(self.ir_arch, self.bs)
else:
raise Exception("Unkown JiT Backend")
self.cpu.init_regs()
self.vm.init_memory_page_pool()
self.vm.init_code_bloc_pool()
self.vm.init_memory_breakpoint()
self.vm.set_addr2obj(self.jit.addr2obj)
self.jit.load()
self.cpu.vmmngr = self.vm
self.cpu.jitter = self.jit
self.stack_size = 0x10000
self.stack_base = 0x1230000
# Init callback handler
self.breakpoints_handler = CallbackHandler()
self.exceptions_handler = CallbackHandlerBitflag()
self.init_exceptions_handler()
self.exec_cb = None
def init_exceptions_handler(self):
"Add common exceptions handlers"
def exception_automod(jitter):
"Tell the JiT backend to update blocs modified"
self.jit.updt_automod_code(jitter.vm)
self.vm.set_exception(0)
return True
def exception_memory_breakpoint(jitter):
"Stop the execution and return an identifier"
return ExceptionHandle.memoryBreakpoint()
self.add_exception_handler(EXCEPT_CODE_AUTOMOD, exception_automod)
self.add_exception_handler(EXCEPT_BREAKPOINT_INTERN,
exception_memory_breakpoint)
def add_breakpoint(self, addr, callback):
"""Add a callback associated with addr.
@addr: breakpoint address
@callback: function with definition (jitter instance)
"""
self.breakpoints_handler.add_callback(addr, callback)
self.jit.add_disassembly_splits(addr)
def set_breakpoint(self, addr, *args):
"""Set callbacks associated with addr.
@addr: breakpoint address
@args: functions with definition (jitter instance)
"""
self.breakpoints_handler.set_callback(addr, *args)
self.jit.add_disassembly_splits(addr)
def remove_breakpoints_by_callback(self, callback):
"""Remove callbacks associated with breakpoint.
@callback: callback to remove
"""
empty_keys = self.breakpoints_handler.remove_callback(callback)
for key in empty_keys:
self.jit.remove_disassembly_splits(key)
def add_exception_handler(self, flag, callback):
"""Add a callback associated with an exception flag.
@flag: bitflag
@callback: function with definition (jitter instance)
"""
self.exceptions_handler.add_callback(flag, callback)
def runbloc(self, pc):
"""Wrapper on JiT backend. Run the code at PC and return the next PC.
@pc: address of code to run"""
return self.jit.runbloc(self.cpu, self.vm, pc)
def runiter_once(self, pc):
"""Iterator on callbacks results on code running from PC.
Check exceptions before breakpoints."""
self.pc = pc
# Callback called before exec
if self.exec_cb is not None:
res = self.exec_cb(self)
if res is not True:
yield res
# Check breakpoints
old_pc = self.pc
for res in self.breakpoints_handler(self.pc, self):
if res is not True:
yield res
# If a callback changed pc, re call every callback
if old_pc != self.pc:
return
# Exceptions should never be activated before run
assert(self.get_exception() == 0)
# Run the bloc at PC
self.pc = self.runbloc(self.pc)
# Check exceptions
exception_flag = self.get_exception()
for res in self.exceptions_handler(exception_flag, self):
if res is not True:
yield res
def init_run(self, pc):
"""Create an iterator on pc with runiter.
@pc: address of code to run
"""
self.run_iterator = self.runiter_once(pc)
self.pc = pc
self.run = True
def continue_run(self, step=False):
"""PRE: init_run.
Continue the run of the current session until iterator returns or run is
set to False.
If step is True, run only one time.
Return the iterator value"""
while self.run:
try:
return self.run_iterator.next()
except StopIteration:
pass
self.run_iterator = self.runiter_once(self.pc)
if step is True:
return None
return None
def init_stack(self):
self.vm.add_memory_page(
self.stack_base, PAGE_READ | PAGE_WRITE, "\x00" * self.stack_size)
sp = self.arch.getsp(self.attrib)
setattr(self.cpu, sp.name, self.stack_base + self.stack_size)
# regs = self.cpu.get_gpreg()
# regs[sp.name] = self.stack_base+self.stack_size
# self.cpu.set_gpreg(regs)
def get_exception(self):
return self.cpu.get_exception() | self.vm.get_exception()
# commun functions
def get_str_ansi(self, addr, max_char=None):
"""Get ansi str from vm.
@addr: address in memory
@max_char: maximum len"""
l = 0
tmp = addr
while ((max_char is None or l < max_char) and
self.vm.get_mem(tmp, 1) != "\x00"):
tmp += 1
l += 1
return self.vm.get_mem(addr, l)
def get_str_unic(self, addr, max_char=None):
"""Get unicode str from vm.
@addr: address in memory
@max_char: maximum len"""
l = 0
tmp = addr
while ((max_char is None or l < max_char) and
self.vm.get_mem(tmp, 2) != "\x00\x00"):
tmp += 2
l += 2
s = self.vm.get_mem(addr, l)
s = s[::2] # TODO: real unicode decoding
return s
def set_str_ansi(self, addr, s):
"""Set an ansi string in memory"""
s = s + "\x00"
self.vm.set_mem(addr, s)
def set_str_unic(self, addr, s):
"""Set an unicode string in memory"""
s = "\x00".join(list(s)) + '\x00' * 3
self.vm.set_mem(addr, s)
@staticmethod
def handle_lib(jitter):
"""Resolve the name of the function which cause the handler call. Then
call the corresponding handler from users callback.
"""
fname = jitter.libs.fad2cname[jitter.pc]
if fname in jitter.user_globals:
func = jitter.user_globals[fname]
else:
log.debug('%r', fname)
raise ValueError('unknown api', hex(jitter.pc), repr(fname))
func(jitter)
jitter.pc = getattr(jitter.cpu, jitter.ir_arch.pc.name)
return True
def handle_function(self, f_addr):
"""Add a brakpoint which will trigger the function handler"""
self.add_breakpoint(f_addr, self.handle_lib)
def add_lib_handler(self, libs, user_globals=None):
"""Add a function to handle libs call with breakpoints
@libs: libimp instance
@user_globals: dictionnary for defined user function
"""
if user_globals is None:
user_globals = {}
self.libs = libs
self.user_globals = user_globals
for f_addr in libs.fad2cname:
self.handle_function(f_addr)
| gpl-2.0 | 6,448,087,656,768,726,000 | 30.444186 | 80 | 0.586347 | false |
freakboy3742/django | django/core/mail/message.py | 10 | 17291 | import mimetypes
from email import (
charset as Charset, encoders as Encoders, generator, message_from_string,
)
from email.errors import HeaderParseError
from email.header import Header
from email.headerregistry import Address, parser
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formataddr, formatdate, getaddresses, make_msgid
from io import BytesIO, StringIO
from pathlib import Path
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import force_str, punycode
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
utf8_charset_qp = Charset.Charset('utf-8')
utf8_charset_qp.body_encoding = Charset.QP
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
RFC5322_EMAIL_LINE_LENGTH_LIMIT = 998
class BadHeaderError(ValueError):
pass
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = {
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
}
def forbid_multi_line_headers(name, val, encoding):
"""Forbid multi-line headers to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = str(val) # val may be lazy
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding) for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return name, val
def sanitize_address(addr, encoding):
"""
Format a pair of (name, address) or an email address string.
"""
address = None
if not isinstance(addr, tuple):
addr = force_str(addr)
try:
token, rest = parser.get_mailbox(addr)
except (HeaderParseError, ValueError, IndexError):
raise ValueError('Invalid address "%s"' % addr)
else:
if rest:
# The entire email address must be parsed.
raise ValueError(
'Invalid address; only %s could be parsed from "%s"'
% (token, addr)
)
nm = token.display_name or ''
localpart = token.local_part
domain = token.domain or ''
else:
nm, address = addr
localpart, domain = address.rsplit('@', 1)
address_parts = nm + localpart + domain
if '\n' in address_parts or '\r' in address_parts:
raise ValueError('Invalid address; address parts cannot contain newlines.')
# Avoid UTF-8 encode, if it's possible.
try:
nm.encode('ascii')
nm = Header(nm).encode()
except UnicodeEncodeError:
nm = Header(nm, encoding).encode()
try:
localpart.encode('ascii')
except UnicodeEncodeError:
localpart = Header(localpart, encoding).encode()
domain = punycode(domain)
parsed_address = Address(username=localpart, domain=domain)
return formataddr((nm, parsed_address.addr_spec))
class MIMEMixin:
def as_string(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = StringIO()
g = generator.Generator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
def as_bytes(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, _text, _subtype='plain', _charset=None):
self.encoding = _charset
MIMEText.__init__(self, _text, _subtype=_subtype, _charset=_charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
def set_payload(self, payload, charset=None):
if charset == 'utf-8' and not isinstance(charset, Charset.Charset):
has_long_lines = any(
len(line.encode()) > RFC5322_EMAIL_LINE_LENGTH_LIMIT
for line in payload.splitlines()
)
# Quoted-Printable encoding has the side effect of shortening long
# lines, if any (#22561).
charset = utf8_charset_qp if has_long_lines else utf8_charset
MIMEText.set_payload(self, payload, charset=charset)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage:
"""A container for email information."""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None,
reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
"""
if to:
if isinstance(to, str):
raise TypeError('"to" argument must be a list or tuple')
self.to = list(to)
else:
self.to = []
if cc:
if isinstance(cc, str):
raise TypeError('"cc" argument must be a list or tuple')
self.cc = list(cc)
else:
self.cc = []
if bcc:
if isinstance(bcc, str):
raise TypeError('"bcc" argument must be a list or tuple')
self.bcc = list(bcc)
else:
self.bcc = []
if reply_to:
if isinstance(reply_to, str):
raise TypeError('"reply_to" argument must be a list or tuple')
self.reply_to = list(reply_to)
else:
self.reply_to = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body or ''
self.attachments = []
if attachments:
for attachment in attachments:
if isinstance(attachment, MIMEBase):
self.attach(attachment)
else:
self.attach(*attachment)
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
self._set_list_header_if_not_empty(msg, 'To', self.to)
self._set_list_header_if_not_empty(msg, 'Cc', self.cc)
self._set_list_header_if_not_empty(msg, 'Reply-To', self.reply_to)
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
# formatdate() uses stdlib methods to format the date, which use
# the stdlib/OS concept of a timezone, however, Django sets the
# TZ environment variable based on the TIME_ZONE setting which
# will get picked up by formatdate().
msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME)
if 'message-id' not in header_names:
# Use cached DNS_NAME for performance
msg['Message-ID'] = make_msgid(domain=DNS_NAME)
for name, value in self.extra_headers.items():
if name.lower() != 'from': # From is already handled
msg[name] = value
return msg
def recipients(self):
"""
Return a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return [email for email in (self.to + self.cc + self.bcc) if email]
def send(self, fail_silently=False):
"""Send the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attach a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass, insert it directly
into the resulting message attachments.
For a text/* mimetype (guessed or specified), when a bytes object is
specified as content, decode it as UTF-8. If that fails, set the
mimetype to DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content.
"""
if isinstance(filename, MIMEBase):
if content is not None or mimetype is not None:
raise ValueError(
'content and mimetype must not be given when a MIMEBase '
'instance is provided.'
)
self.attachments.append(filename)
elif content is None:
raise ValueError('content must be provided.')
else:
mimetype = mimetype or mimetypes.guess_type(filename)[0] or DEFAULT_ATTACHMENT_MIME_TYPE
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
if isinstance(content, bytes):
try:
content = content.decode()
except UnicodeDecodeError:
# If mimetype suggests the file is text but it's
# actually binary, read() raises a UnicodeDecodeError.
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""
Attach a file from the filesystem.
Set the mimetype to DEFAULT_ATTACHMENT_MIME_TYPE if it isn't specified
and cannot be guessed.
For a text/* mimetype (guessed or specified), decode the file's content
as UTF-8. If that fails, set the mimetype to
DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content.
"""
path = Path(path)
with path.open('rb') as file:
content = file.read()
self.attach(path.name, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body or body_msg.is_multipart():
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Convert the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(force_str(content))
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Convert the filename, content, mimetype triple into a MIME attachment
object.
"""
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment', filename=filename)
return attachment
def _set_list_header_if_not_empty(self, msg, header, values):
"""
Set msg's header, either from self.extra_headers, if present, or from
the values argument.
"""
if values:
try:
value = self.extra_headers[header]
except KeyError:
value = ', '.join(str(v) for v in values)
msg[header] = value
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None, reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
"""
super().__init__(
subject, body, from_email, to, bcc, connection, attachments,
headers, cc, reply_to,
)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
if content is None or mimetype is None:
raise ValueError('Both content and mimetype must be provided.')
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
| bsd-3-clause | 3,841,961,167,435,941,400 | 37.339246 | 105 | 0.609739 | false |
JulyKikuAkita/PythonPrac | cs15211/BagofTokens.py | 1 | 2935 | import collections
__source__ = 'https://leetcode.com/problems/bag-of-tokens/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 948. Bag of Tokens
#
# You have an initial power P, an initial score of 0 points,
# and a bag of tokens.
#
# Each token can be used at most once, has a value token[i],
# and has potentially two ways to use it.
#
# If we have at least token[i] power, we may play the token face up,
# losing token[i] power, and gaining 1 point.
# If we have at least 1 point,
# we may play the token face down, gaining token[i] power,
# and losing 1 point.
# Return the largest number of points we can have after playing any number of tokens.
#
# Example 1:
#
# Input: tokens = [100], P = 50
# Output: 0
# Example 2:
#
# Input: tokens = [100,200], P = 150
# Output: 1
# Example 3:
#
# Input: tokens = [100,200,300,400], P = 200
# Output: 2
#
#
# Note:
#
# tokens.length <= 1000
# 0 <= tokens[i] < 10000
# 0 <= P < 10000
#
import unittest
#28ms 83.33%
class Solution(object):
def bagOfTokensScore(self, tokens, P):
"""
:type tokens: List[int]
:type P: int
:rtype: int
"""
tokens.sort()
deque = collections.deque(tokens)
ans = bns = 0
while deque and (P >= deque[0] or bns):
while deque and P >= deque[0]:
P -= deque.popleft()
bns += 1
ans = max(ans, bns)
if deque and bns:
P += deque.pop()
bns -= 1
return ans
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/bag-of-tokens/solution/
Approach 1: Greedy
Complexity Analysis
Time Complexity: O(NlogN), where NN is the length of tokens.
Space Complexity: O(N).
#11ms 29.13%
class Solution {
public int bagOfTokensScore(int[] tokens, int P) {
Arrays.sort(tokens);
int lo = 0, hi = tokens.length - 1;
int points = 0, ans = 0;
while (lo <= hi && (P >= tokens[lo] || points > 0)) {
while (lo <= hi && P >= tokens[lo]) {
P -= tokens[lo++];
points++;
}
ans = Math.max(ans, points);
if (lo <= hi && points > 0) {
P += tokens[hi--];
points--;
}
}
return ans;
}
}
#7ms 99.58%
class Solution {
public int bagOfTokensScore(int[] tokens, int P) {
int left = 0;
int right = tokens.length -1 ;
int tok = 0;
Arrays.sort(tokens);
while(left <= right){
if(P >= tokens[left]){
P -= tokens[left++];
tok++;
} else if (tok > 0 && left != right){
P += tokens[right--];
tok--;
} else
break;
}
return tok;
}
}
''' | apache-2.0 | -2,701,007,191,067,641,000 | 22.677419 | 85 | 0.523339 | false |
jason-neal/companion_simulations | simulators/tcm_module.py | 1 | 7702 | import logging
import os
import numpy as np
import pandas as pd
from logutils import BraceMessage as __
from tqdm import tqdm
import simulators
from mingle.models.broadcasted_models import two_comp_model
from mingle.utilities.chisqr import chi_squared
from mingle.utilities.phoenix_utils import load_starfish_spectrum
from mingle.utilities.simulation_utilities import check_inputs
from simulators.common_setup import setup_dirs, sim_helper_function
from simulators.iam_module import observation_rv_limits
from simulators.iam_module import renormalization
from numpy import ndarray
from typing import Dict, List, Tuple, Union
def setup_tcm_dirs(star: str) -> None:
setup_dirs(star, mode="tcm")
return None
def tcm_helper_function(star: str, obsnum: Union[int, str], chip: int, skip_params: bool = False) -> Tuple[
str, Dict[str, Union[str, float, List[Union[str, float]]]], str]:
return sim_helper_function(star, obsnum, chip, skip_params=skip_params, mode="tcm")
def tcm_analysis(obs_spec, model1_pars, model2_pars, alphas=None, rvs=None,
gammas=None, errors=None, verbose=False, norm=False, save_only=True,
chip=None, prefix=None, wav_scale=True, norm_method="scalar"):
"""Run two component model over all parameter combinations in model1_pars and model2_pars."""
alphas = check_inputs(alphas)
rvs = check_inputs(rvs)
gammas = check_inputs(gammas)
if isinstance(model1_pars, list):
logging.debug(__("Number of close model_pars returned {0}", len(model1_pars)))
if isinstance(model2_pars, list):
logging.debug(__("Number of close model_pars returned {0}", len(model2_pars)))
args = [model2_pars, alphas, rvs, gammas, obs_spec]
kwargs = {"norm": norm, "save_only": save_only, "chip": chip,
"prefix": prefix, "verbose": verbose, "errors": errors,
"wav_scale": wav_scale, "norm_method": norm_method}
broadcast_chisqr_vals = np.empty((len(model1_pars), len(model2_pars)))
for ii, params1 in enumerate(tqdm(model1_pars)):
broadcast_chisqr_vals[ii] = tcm_wrapper(ii, params1, *args, **kwargs)
if save_only:
return None
else:
return broadcast_chisqr_vals # Just output the best value for each model pair
def tcm_wrapper(num, params1, model2_pars, alphas, rvs, gammas, obs_spec,
errors=None, norm=True, verbose=False, save_only=True,
chip=None, prefix=None, wav_scale=True, norm_method="scalar"):
"""Wrapper for iteration loop of tcm. params1 fixed, model2_pars are many."""
normalization_limits = [2105, 2185] # small as possible?
if prefix is None:
sf = os.path.join(simulators.paths["output_dir"], obs_spec.header["OBJECT"].upper(),
"tc_{0}_{1}-{2}_part{6}_host_pars_[{3}_{4}_{5}].csv".format(
obs_spec.header["OBJECT"].upper(), int(obs_spec.header["MJD-OBS"]), chip,
params1[0], params1[1], params1[2], num))
else:
sf = "{0}_part{4}_host_pars_[{1}_{2}_{3}].csv".format(
prefix, params1[0], params1[1], params1[2], num)
save_filename = sf
if os.path.exists(save_filename) and save_only:
print("''{}' exists, so not repeating calculation.".format(save_filename))
return None
else:
if not save_only:
broadcast_chisqr_vals = np.empty(len(model2_pars))
for jj, params2 in enumerate(model2_pars):
if verbose:
print("Starting iteration with parameters:\n {0}={1},{2}={3}".format(num, params1, jj, params2))
mod1_spec = load_starfish_spectrum(params1, limits=normalization_limits, hdr=True,
normalize=True, wav_scale=wav_scale)
mod2_spec = load_starfish_spectrum(params2, limits=normalization_limits, hdr=True,
normalize=True, wav_scale=wav_scale)
# Wavelength selection
rv_limits = observation_rv_limits(obs_spec, rvs, gammas)
mod1_spec.wav_select(*rv_limits)
mod2_spec.wav_select(*rv_limits)
obs_spec = obs_spec.remove_nans()
# One component model with broadcasting over gammas
# two_comp_model(wav, model1, model2, alphas, rvs, gammas)
assert np.allclose(mod1_spec.xaxis, mod2_spec.xaxis)
broadcast_result = two_comp_model(mod1_spec.xaxis, mod1_spec.flux, mod2_spec.flux,
alphas=alphas, rvs=rvs, gammas=gammas)
broadcast_values = broadcast_result(obs_spec.xaxis)
assert ~np.any(np.isnan(obs_spec.flux)), "Observation is nan"
# RE-NORMALIZATION
if chip == 4:
# Quadratically renormalize anyway
obs_spec = renormalization(obs_spec, broadcast_values, normalize=True, method="quadratic")
obs_flux = renormalization(obs_spec, broadcast_values, normalize=norm, method=norm_method)
# sp_chisquare is much faster but don't think I can add masking.
broadcast_chisquare = chi_squared(obs_flux, broadcast_values, error=errors)
# sp_chisquare = stats.chisquare(obs_flux, broadcast_values, axis=0).statistic
# broadcast_chisquare = sp_chisquare
if not save_only:
print(broadcast_chisquare.shape)
print(broadcast_chisquare.ravel()[np.argmin(broadcast_chisquare)])
broadcast_chisqr_vals[jj] = broadcast_chisquare.ravel()[np.argmin(broadcast_chisquare)]
npix = obs_flux.shape[0]
save_full_tcm_chisqr(save_filename, params1, params2, alphas, rvs, gammas, broadcast_chisquare, npix,
verbose=verbose)
if save_only:
return None
else:
return broadcast_chisqr_vals
def save_full_tcm_chisqr(filename: str, params1: List[Union[int, float]], params2: List[Union[int, float]],
alphas: ndarray, rvs: ndarray, gammas: ndarray, broadcast_chisquare: ndarray, npix: int,
verbose: bool = False) -> None:
"""Save the iterations chisqr values to a cvs."""
a_grid, r_grid, g_grid = np.meshgrid(alphas, rvs, gammas, indexing='ij')
assert a_grid.shape == r_grid.shape
assert r_grid.shape == g_grid.shape
assert g_grid.shape == broadcast_chisquare.shape
data = {"alpha": a_grid.ravel(), "rv": r_grid.ravel(), "gamma": g_grid.ravel(),
"chi2": broadcast_chisquare.ravel()}
columns = ["alpha", "rv", "gamma", "chi2"]
len_c = len(columns)
df = pd.DataFrame(data=data, columns=columns)
for par, value in zip(["teff_2", "logg_2", "feh_2"], params2):
df[par] = value
columns = ["teff_2", "logg_2", "feh_2"] + columns
if "[{}_{}_{}]".format(params1[0], params1[1], params1[2]) not in filename:
for par, value in zip(["teff_1", "logg_1", "feh_1"], params1):
df[par] = value
columns = ["teff_1", "logg_1", "feh_1"] + columns
df["npix"] = npix
columns = columns[:-len_c] + ["npix"] + columns[-len_c:]
df = df.round(decimals={"logg_2": 1, "feh_2": 1, "alpha": 4,
"rv": 3, "gamma": 3, "chi2": 4})
exists = os.path.exists(filename)
if exists:
df[columns].to_csv(filename, sep=',', mode="a", index=False, header=False)
else:
# Add header at the top only
df[columns].to_csv(filename, sep=',', mode="a", index=False, header=True)
if verbose:
print("Saved chi2 values to {}".format(filename))
return None
| mit | 396,135,552,089,401,860 | 42.514124 | 113 | 0.611529 | false |
ATRAN2/fureon | backend/fureon/exceptions.py | 1 | 1628 | from fureon.utils.logger import main_logger
class ExceptionWithLogger(Exception):
def __init__(self, message, logger, level='warning'):
super(ExceptionWithLogger, self).__init__(message)
exception_name = self.__class__.__name__
logger_message = u'{0}: {1}'.format(exception_name, message)
logger_function = getattr(logger, level)
logger_function(logger_message)
class FileNotFoundError(ExceptionWithLogger):
def __init__(self, message='', logger=main_logger):
super(FileNotFoundError, self).__init__(message, logger)
class FileTypeError(ExceptionWithLogger):
def __init__(self, message='', logger=main_logger):
super(FileTypeError, self).__init__(message, logger)
class DuplicateEntryError(ExceptionWithLogger):
def __init__(self, message='', logger=main_logger, level='info'):
super(DuplicateEntryError, self).__init__(message, logger, level)
class InvalidUsernameError(ExceptionWithLogger):
def __init__(self, message='', logger=main_logger):
super(InvalidUsernameError, self).__init__(message, logger)
class InvalidEmailError(ExceptionWithLogger):
def __init__(self, message='', logger=main_logger):
super(InvalidEmailError, self).__init__(message, logger)
class DuplicateUsernameError(ExceptionWithLogger):
def __init__(self, message='', logger=main_logger):
super(DuplicateUsernameError, self).__init__(message, logger)
class DuplicateEmailError(ExceptionWithLogger):
def __init__(self, message='', logger=main_logger):
super(DuplicateEmailError, self).__init__(message, logger)
| apache-2.0 | -6,531,733,261,743,192,000 | 35.177778 | 73 | 0.692875 | false |
richrd/bx | logger.py | 1 | 2699 | # -*- coding: utf-8 -*-
"""
Logging capabilities for the IRC Bot.
"""
import time
from helpers import *
LOG_NORMAL = "norm"
LOG_INFO = "info"
LOG_WARNING = "warn"
LOG_ERROR = "error"
class colors:
BLACK = '\033[30m'
GRAY = '\033[37m'
GREEN = '\033[32m'
MAGENTA = '\033[95m'
VIOLET = '\033[95m'
CYAN = '\033[96m'
BLUE = '\033[94m'
DARKBLUE = '\033[34m'
OKGREEN = '\033[92m'
YELLOW = '\033[93m'
ORANGE = '\033[33m'
RED = '\033[91m'
DARKRED = '\033[31m'
ENDC = '\033[0m'
class Logger:
def __init__(self, bot):
self.bot = bot
self.logs = []
self.show_domains = ["irc", "bot", "mod", "exc", "win", "user", "config"]
self.domain_colors = {
"irc": colors.DARKBLUE,
"bot": None,
"mod": colors.YELLOW,
"exc": colors.RED,
"win": colors.OKGREEN,
"user": colors.MAGENTA,
}
self.show_types = [LOG_NORMAL, LOG_INFO, LOG_WARNING, LOG_ERROR]
self.type_colors = {
LOG_NORMAL: colors.BLUE,
LOG_INFO: colors.GREEN,
LOG_WARNING: colors.YELLOW,
LOG_ERROR: colors.RED,
}
def Loaded(self):
self.show_domains = self.bot.config["log_domains"]
def ColorText(self, text, color):
return color + text + colors.ENDC
def RenderAll(self):
for entry in self.logs:
if entry[1] in self.show_types and entry[2] in self.show_domains:
self.RenderLine(entry)
def RenderLine(self, log_entry):
line = time_stamp_short(log_entry[0])+" "
type_color = self.type_colors[log_entry[1]]
line += self.ColorText("[" + log_entry[1][0].upper() + "] ", type_color)
domain = log_entry[2]
log_str = log_entry[3]
color = log_entry[4]
if not color and domain in self.domain_colors.keys():
color = self.domain_colors[domain]
if color:
line = line + color + log_str + colors.ENDC
else:
line = line + log_str
print line
def Log(self, domain, s, color=None, logtype = LOG_NORMAL):
log_entry = [time.time(), logtype, domain, s, color]
self.logs.append(log_entry)
# print log_entry
if log_entry[1] in self.show_types and log_entry[2] in self.show_domains:
self.RenderLine(log_entry)
def Info(self, domain, s, color=None):
self.Log(domain, s, color, LOG_INFO)
def Warning(self, domain, s, color=None):
self.Log(domain, s, color, LOG_WARNING)
def Error(self, domain, s, color=colors.RED):
self.Log(domain, s, color, LOG_ERROR)
| apache-2.0 | -427,284,577,334,323,100 | 27.114583 | 81 | 0.545758 | false |
psesh/Effective-Quadratures | equadratures/distributions/rayleigh.py | 1 | 3285 | """The Rayleigh distribution."""
from equadratures.distributions.template import Distribution
import numpy as np
from scipy.stats import rayleigh
RECURRENCE_PDF_SAMPLES = 8000
class Rayleigh(Distribution):
"""
The class defines a Rayleigh object. It is the child of Distribution.
:param double scale:
Scale parameter of the Rayleigh distribution.
"""
def __init__(self, scale):
self.scale = scale
self.bounds = np.array([0.0, np.inf])
if self.scale is not None:
if self.scale > 0:
self.mean = self.scale * np.sqrt(np.pi / 2.0)
self.variance = self.scale**2 * (4.0 - np.pi)/ 2.0
self.skewness = 2.0 * np.sqrt(np.pi) * (np.pi - 3.0) / ((4.0 - np.pi)**(1.5))
self.kurtosis = -(6 * np.pi**2 - 24 * np.pi + 16.0 )/( (4 - np.pi)**(1.5)) + 3.0
self.x_range_for_pdf = np.linspace(0.0, 8.0 * self.scale, RECURRENCE_PDF_SAMPLES)
def get_icdf(self, xx):
"""
A Rayleigh inverse cumulative density function.
:param Rayleigh self:
An instance of the Rayleigh class.
:param array xx:
Points at which the inverse cumulative density function needs to be evaluated.
:return:
Inverse cumulative density function values of the Rayleigh distribution.
"""
return rayleigh.ppf(xx, loc=0, scale=self.scale)
def get_description(self):
"""
A description of the Rayleigh distribution.
:param Rayleigh self:
An instance of the Rayleigh class.
:return:
A string describing the Rayleigh distribution.
"""
text = "is a Rayleigh distribution; characterised by its scale parameter, which has been set to "+str(self.scale)+"."
return text
def get_pdf(self, points=None):
"""
A Rayleigh probability density function.
:param Rayleigh self:
An instance of the Rayleigh class.
:param array points:
Points at which the PDF needs to be evaluated.
:return:
Probability density values along the support of the Rayleigh distribution.
"""
return rayleigh.pdf(points, loc=0, scale=self.scale )
def get_cdf(self, points=None):
"""
A Rayleigh cumulative density function.
:param Rayleigh self:
An instance of the Rayleigh class.
:param array points:
Points at which the CDF needs to be evaluated.
:return:
Cumulative density values along the support of the Rayleigh distribution.
"""
return rayleigh.cdf(points, loc=0, scale=self.scale )
def get_samples(self, m=None):
"""
Generates samples from the Rayleigh distribution.
:param rayleigh self:
An instance of the Rayleigh class.
:param integer m:
Number of random samples. If no value is provided, a default of 5e5 is assumed.
:return:
A N-by-1 vector that contains the samples.
"""
if m is not None:
number = m
else:
number = 500000
return rayleigh.rvs(loc=0.0, scale=self.scale, size=number, random_state=None)
| mit | -6,373,363,622,737,242,000 | 35.098901 | 125 | 0.591781 | false |
troeger/opensubmit | web/opensubmit/management/commands/dumpconfig.py | 1 | 1352 | from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User, Group
from opensubmit.signalhandlers import check_permission_system
skiplist = ['AUTHENTICATION_BACKENDS', 'EMAIL_BACKEND', 'LOG_FILE',
'FORCE_SCRIPT_NAME', 'GRAPPELLI_ADMIN_TITLE', 'GRAPPELLI_INDEX_DASHBOARD',
'GRAPPELLI_SWITCH_USER', 'INSTALLED_APPS', 'MIDDLEWARE_CLASSES',
'NOT_CONFIGURED_VALUE', 'ROOT_URLCONF', 'SOCIAL_AUTH_PIPELINE',
'SOCIAL_AUTH_URL_NAMESPACE', 'STATICFILES_FINDERS', 'TEMPLATES', 'TEST_RUNNER']
class Command(BaseCommand):
help = 'Dumps effective configuration after config file parsing.'
def handle(self, *args, **options):
import opensubmit.settings as s
for name in dir(s):
if name is "DATABASES":
value = getattr(s, name)
print("DATABASE: {0}".format(value['default']))
elif name is "LOGGING":
value = getattr(s, name)
print("LOGGING: {0}".format(value['handlers']))
elif name.isupper() and name not in skiplist:
if "SECRET" in name:
print("{0}: {1}".format(name, "............." + getattr(s, name)[-3:]))
else:
print("{0}: {1}".format(name, getattr(s, name)))
| agpl-3.0 | 5,413,204,271,889,965,000 | 47.285714 | 91 | 0.590976 | false |
JaneliaSciComp/osgpyplusplus | examples/osggraphicscost.py | 1 | 2179 | #!/bin/env python
# This is a C++ example from the OpenSceneGraph source code, converted to python
# /* OpenSceneGraph example, osgterrain.
# *
# * Permission is hereby granted, free of charge, to any person obtaining a copy
# * of this software and associated documentation files (the "Software"), to deal
# * in the Software without restriction, including without limitation the rights
# * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# * copies of the Software, and to permit persons to whom the Software is
# * furnished to do so, subject to the following conditions:
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# * THE SOFTWARE.
# */
from osgpypp import osg, osgDB, osgViewer
import sys
class CalibrateCostEstimator(osg.GraphicsOperation):
def __init__(self, gce):
osg.GraphicsOperation.__init__(self, osg.GraphicsOperation("CalibrateCostEstimator", False))
self._gce = gce
def __call__(self, context):
renderInfo = osg.RenderInfo(context.getState(), 0)
self._gce.calibrate(renderInfo)
def main(argv):
arguments = osg.ArgumentParser(argv)
# construct the viewer.
viewer = osgViewer.Viewer(arguments)
node = osgDB.readNodeFiles(arguments)
if node is None:
sys.exit(0)
gce = osg.GraphicsCostEstimator()
viewer.setSceneData(node)
viewer.realize()
compileCost = gce.estimateCompileCost(node)
drawCost = gce.estimateDrawCost(node)
print "estimateCompileCost(", node.getName(), "), CPU=", compileCost.first, " GPU=", compileCost.second
print "estimateDrawCost(", node.getName(), "), CPU=", drawCost.first, " GPU=", drawCost.second
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause | -790,878,742,279,204,200 | 38.618182 | 107 | 0.708123 | false |
ricmoo/pycoind | pycoind/util/key.py | 1 | 3854 | # The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from . import base58
from .ecdsa.ecdsa import point_is_valid
from .ecdsa import SECP256k1 as curve
from .ecdsa.numbertheory import square_root_mod_prime
from .ecdsa.util import number_to_string, string_to_number
from .hash import hash160
__all__ = [
'compress_public_key', 'decompress_public_key',
'privkey_to_wif', 'privkey_from_wif',
'publickey_to_address'
]
def compress_public_key(public_key):
if public_key[0] != chr(0x04) or len(public_key) != 65:
raise ValueError('invalid uncompressed public key')
y_parity = string_to_number(public_key[33:65])
return chr(0x02 + (y_parity & 0x01)) + public_key[1:33]
_a = curve.curve.a()
_b = curve.curve.b()
_p = curve.curve.p()
_n = curve.order
def decompress_public_key(public_key):
if public_key[0] == chr(0x04) and len(public_key) == 65:
x = string_to_number(public_key[1:33])
y = string_to_number(public_key[33:65])
if not point_is_valid(curve.generator, x, y):
raise ValueError('invalid public key')
return public_key
if public_key[0] not in (chr(0x02), chr(0x03)) or len(public_key) != 33:
raise ValueError('invalid compressed public key')
x = string_to_number(public_key[1:])
y = square_root_mod_prime((x ** 3 + _a * x + _b) % _p, _p)
if not point_is_valid(curve.generator, x, y):
raise ValueError('invalid public key')
if (ord(public_key[0]) & 0x01) != (y & 0x01):
y = _p - y
return chr(0x04) + public_key[1:] + number_to_string(y, _n)
# See: https://en.bitcoin.it/wiki/Wallet_import_format
def privkey_to_wif(privkey, prefix = chr(0x80)):
return base58.encode_check(prefix + privkey)
# See: https://en.bitcoin.it/wiki/Wallet_import_format
def privkey_from_wif(privkey, prefix = chr(0x80)):
key = base58.decode_check(privkey)
if prefix != key[0]:
raise ValueError('wif private key has does not match prefix')
if len(key) == 33:
if privkey[0] != '5':
raise ValueError('uncompressed wif private key does not begin with 5')
return key[1:]
elif len(key) == 34:
if key[-1] != chr(0x01):
raise ValueError('compressed wif private key missing compression bit')
if privkey[0] not in ('L', 'K'):
raise ValueError('uncompressed wif private key does not begin with 5')
return key[1:-1]
raise ValueError('invalid wif private key')
def pubkeyhash_to_address(publickey_hash, version = chr(0)):
return base58.encode_check(version + publickey_hash)
# See: https://en.bitcoin.it/wiki/Technical_background_of_Bitcoin_addresses
def publickey_to_address(publickey, version = chr(0)):
return pubkeyhash_to_address(hash160(publickey), version)
| mit | -1,387,571,464,477,286,000 | 37.54 | 82 | 0.6863 | false |
stevearc/dynamo3 | dynamo3/exception.py | 1 | 1948 | """ Exceptions and exception logic for DynamoDBConnection """
import sys
from pprint import pformat
import botocore
class DynamoDBError(botocore.exceptions.BotoCoreError):
""" Base error that we get back from Dynamo """
fmt = "{Code}: {Message}\nArgs: {args}"
def __init__(self, status_code, exc_info=None, **kwargs):
self.exc_info = exc_info
self.status_code = status_code
super(DynamoDBError, self).__init__(**kwargs)
def re_raise(self):
""" Raise this exception with the original traceback """
if self.exc_info is not None:
traceback = self.exc_info[2]
if self.__traceback__ != traceback:
raise self.with_traceback(traceback)
raise self
class ConditionalCheckFailedException(DynamoDBError):
""" Raised when an item field value fails the expected value check """
fmt = "{Code}: {Message}"
CheckFailed = ConditionalCheckFailedException
class TransactionCanceledException(DynamoDBError):
""" Raised when a transaction fails """
fmt = "{Code}: {Message}"
class ProvisionedThroughputExceededException(DynamoDBError):
""" Raised when an item field value fails the expected value check """
fmt = "{Code}: {Message}"
ThroughputException = ProvisionedThroughputExceededException
EXC = {
"ConditionalCheckFailedException": ConditionalCheckFailedException,
"ProvisionedThroughputExceededException": ThroughputException,
"TransactionCanceledException": TransactionCanceledException,
}
def translate_exception(exc, kwargs):
""" Translate a botocore.exceptions.ClientError into a dynamo3 error """
error = exc.response["Error"]
error.setdefault("Message", "")
err_class = EXC.get(error["Code"], DynamoDBError)
return err_class(
exc.response["ResponseMetadata"]["HTTPStatusCode"],
exc_info=sys.exc_info(),
args=pformat(kwargs),
**error
)
| mit | 195,750,149,695,767,780 | 26.43662 | 76 | 0.684292 | false |
kodiful/plugin.video.garapon.tv | resources/lib/item.py | 1 | 5427 | # -*- coding: utf-8 -*-
import datetime
import time
import re
import os
import sys
import io
from urllib.parse import urlencode
from PIL import Image
from sqlite3 import dbapi2 as sqlite
from resources.lib.common import Common
from resources.lib.genre import Genre
from resources.lib.request import Request
from resources.lib.downloader import Downloader
class Item():
def __init__(self, item, onair=False):
# オンエアのステータス
self.onair = onair
# JSONオブジェクトをコピーする
self.item = item
# 付加情報で上書きする
gtvid = self.item['gtvid']
self.item['_summary'] = {
'title': self.item['title'],
'url': Request().content_url(gtvid),
'date': self.item['startdate'],
'description': self.item['description'],
'source': self.item['bc'],
'category': self.genre(),
'duration': self.duration(),
'thumbnail': Request().thumbnail_url(gtvid),
'thumbfile': self.thumbnail(),
'contentid': gtvid,
}
# コンテキストメニュー
self.contextmenu = self.get_contextmenu()
def title(self):
if self.onair:
try:
t = datetime.datetime.strptime(self.item['startdate'], '%Y-%m-%d %H:%M:%S')
except TypeError:
t = datetime.datetime.fromtimestamp(time.mktime(time.strptime(self.item['startdate'], '%Y-%m-%d %H:%M:%S')))
sdate = t.strftime('%H:%M')
s = time.strptime(self.item['duration'], '%H:%M:%S')
s = t + datetime.timedelta(hours=s.tm_hour, minutes=s.tm_min, seconds=s.tm_sec)
edate = s.strftime('%H:%M')
title = '%s [COLOR khaki]▶ %s (%s〜%s)[/COLOR]' % (self.item['bc'], self.item['title'], sdate, edate)
else:
title = self.item['title']
return title
def date(self):
match = re.search('^([0-9]{4})-([0-9]{2})-([0-9]{2})', self.item['startdate'])
date = '%s.%s.%s' % (match.group(3), match.group(2), match.group(1))
return date
def duration(self):
if self.onair:
duration = ''
else:
match = re.search('^([0-9]{2,}):([0-9]{2}):([0-9]{2})', self.item['duration'])
duration = '%d' % (int(match.group(1)) * 3600 + int(match.group(2)) * 60 + int(match.group(2)))
return duration
def genre(self):
if self.item['genre'] is None:
return ''
else:
buf = []
for item1 in self.item['genre']:
(id0, id1) = item1.split('/')
genre = Genre().search(id0, id1)
if genre['name1']:
buf.append(genre['name1'])
elif genre['name0']:
buf.append(genre['name0'])
return ', '.join(buf)
def thumbnail(self):
imagefile = os.path.join(Common.CACHE_PATH, '%s.png' % self.item['gtvid'])
if os.path.isfile(imagefile) and os.path.getsize(imagefile) < 1000:
# delete imagefile
os.remove(imagefile)
# delete from database
conn = sqlite.connect(Common.CACHE_DB)
c = conn.cursor()
# c.execute("SELECT cachedurl FROM texture WHERE url = '%s';" % imagefile)
c.execute("DELETE FROM texture WHERE url = '%s';" % imagefile)
conn.commit()
conn.close()
if os.path.isfile(imagefile):
pass
else:
buffer = Request().thumbnail(gtvid=self.item['gtvid'])
image = Image.open(io.BytesIO(buffer)) # 320x180
image = image.resize((216, 122))
background = Image.new('RGB', (216, 216), (0, 0, 0))
background.paste(image, (0, 47))
background.save(imagefile, 'PNG')
return imagefile
# コンテキストメニュー
def get_contextmenu(self):
gtvid = self.item['gtvid']
title = self.item['title']
menu = []
# 詳細情報
menu.append((Common.STR(30906), 'Action(Info)'))
# スマートリストに追加
try:
if self.item['genre'][0]:
genre = self.item['genre'][0].split('/')
else:
genre = ['', '']
except Exception:
genre = ['', '']
args = {'mode': 'beginEditSmartList', 'name': title, 'ch': self.item['ch'], 'g0': genre[0], 'g1': genre[1]}
menu.append((Common.STR(30903), 'RunPlugin(%s?%s)' % (sys.argv[0], urlencode(args))))
# お気に入りに追加
if self.item['favorite'] == '0':
# add
args = {'mode': 'switchFavorites', 'name': title, 'url': urlencode({'gtvid': gtvid, 'rank': 1})}
menu.append((Common.STR(30925), 'RunPlugin(%s?%s)' % (sys.argv[0], urlencode(args))))
else:
# delete
args = {'mode': 'switchFavorites', 'name': title, 'url': urlencode({'gtvid': gtvid, 'rank': 0})}
menu.append((Common.STR(30926), 'RunPlugin(%s?%s)' % (sys.argv[0], urlencode(args))))
# サウンロードに追加
menu += Downloader().contextmenu(self.item, Request().content_url(gtvid))
# トップに戻る
menu.append((Common.STR(30936), 'Container.Update(%s,replace)' % (sys.argv[0])))
return menu
| gpl-2.0 | -6,783,332,626,776,033,000 | 36.464286 | 124 | 0.523546 | false |
michaeljoseph/cookiecutter | cookiecutter/utils.py | 1 | 2734 | # -*- coding: utf-8 -*-
"""Helper functions used throughout Cookiecutter."""
from __future__ import unicode_literals
import contextlib
import errno
import logging
import os
import stat
import shutil
import sys
from cookiecutter.prompt import read_user_yes_no
logger = logging.getLogger(__name__)
def force_delete(func, path, exc_info):
"""Error handler for `shutil.rmtree()` equivalent to `rm -rf`.
Usage: `shutil.rmtree(path, onerror=force_delete)`
From stackoverflow.com/questions/1889597
"""
os.chmod(path, stat.S_IWRITE)
func(path)
def rmtree(path):
"""Remove a directory and all its contents. Like rm -rf on Unix.
:param path: A directory path.
"""
shutil.rmtree(path, onerror=force_delete)
def make_sure_path_exists(path):
"""Ensure that a directory exists.
:param path: A directory path.
"""
logger.debug('Making sure path exists: %s', path)
try:
os.makedirs(path)
logger.debug('Created directory at: %s', path)
except OSError as exception:
if exception.errno != errno.EEXIST:
return False
return True
@contextlib.contextmanager
def work_in(dirname=None):
"""Context manager version of os.chdir.
When exited, returns to the working directory prior to entering.
"""
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
def make_executable(script_path):
"""Make `script_path` executable.
:param script_path: The file to change
"""
status = os.stat(script_path)
os.chmod(script_path, status.st_mode | stat.S_IEXEC)
def prompt_and_delete(path, no_input=False):
"""
Ask user if it's okay to delete the previously-downloaded file/directory.
If yes, delete it. If no, checks to see if the old version should be
reused. If yes, it's reused; otherwise, Cookiecutter exits.
:param path: Previously downloaded zipfile.
:param no_input: Suppress prompt to delete repo and just delete it.
:return: True if the content was deleted
"""
# Suppress prompt if called via API
if no_input:
ok_to_delete = True
else:
question = (
"You've downloaded {} before. " "Is it okay to delete and re-download it?"
).format(path)
ok_to_delete = read_user_yes_no(question, 'yes')
if ok_to_delete:
if os.path.isdir(path):
rmtree(path)
else:
os.remove(path)
return True
else:
ok_to_reuse = read_user_yes_no(
"Do you want to re-use the existing version?", 'yes'
)
if ok_to_reuse:
return False
sys.exit()
| bsd-3-clause | 5,201,034,186,949,158,000 | 23.630631 | 86 | 0.631309 | false |
teddy-michel/Mimir | movies/urls.py | 1 | 5440 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^$", views.index, name="movies_home"),
url(r"^movies$", views.moviesList, name="movies_list"),
url(r"^movies/create$", views.movieCreate, name="movie_create"),
url(r"^movies/import$", views.movieImport, name="movie_import"),
url(r"^movies/api/list$", views.moviesAPIList, name="movies_api_list"),
url(r"^movies/api/search$", views.moviesAPISearch, name="movies_api_search"),
url(r"^movies/api/import/(?P<imdb>[0-9]+)$", views.moviesAPIImport, name="movie_api_import"),
url(r"^movies/(?P<id>[0-9]+)$", views.movieInfos, name="movie_infos"),
url(r"^movies/(?P<id>[0-9]+)/edit$", views.movieEdit, name="movie_edit"),
url(r"^movies/(?P<id>[0-9]+)/delete$", views.movieDelete, name="movie_delete"),
url(r"^movies/(?P<id>[0-9]+)/add_view$", views.movieAddView, name="movie_add_view"),
url(r"^movies/(?P<id>[0-9]+)/add_actor$", views.movieAddActor, name="movie_add_actor"),
url(r"^movies/(?P<id>[0-9]+)/add_actordubbing$", views.movieAddActorDubbing, name="movie_add_actordubbing"),
url(r"^movies/(?P<id>[0-9]+)/add_crewmember$", views.movieAddCrewMember, name="movie_add_crewmember"),
url(r"^movies/(?P<id>[0-9]+)/add_ref$", views.movieAddRef, name="movie_add_ref"),
url(r"^movies/(?P<id>[0-9]+)/add_tag$", views.movieAddTag, name="movie_add_tag"),
url(r"^movies/(?P<id>[0-9]+)/add_attribute$", views.movieAddAttribute, name="movie_add_attribute"),
url(r"^movies/(?P<id>[0-9]+)/add_link$", views.movieAddLink, name="movie_add_link"),
url(r"^movies/(?P<id>[0-9]+)/edit_views$", views.movieEditViews, name="movie_edit_views"),
url(r"^movies/(?P<id>[0-9]+)/edit_actors$", views.movieEditActors, name="movie_edit_actors"),
url(r"^movies/(?P<id>[0-9]+)/edit_actordubbings$", views.movieEditActorsDubbing, name="movie_edit_actorsdubbing"),
url(r"^movies/(?P<id>[0-9]+)/edit_crew$", views.movieEditCrew, name="movie_edit_crew"),
url(r"^movies/(?P<id>[0-9]+)/edit_refs$", views.movieEditRefs, name="movie_edit_refs"),
url(r"^movies/(?P<id>[0-9]+)/edit_tags$", views.movieEditTags, name="movie_edit_tags"),
url(r"^movies/(?P<id>[0-9]+)/edit_attributes$", views.movieEditAttributes, name="movie_edit_attributes"),
url(r"^movies/(?P<id>[0-9]+)/edit_links$", views.movieEditLinks, name="movie_edit_links"),
url(r"^movies/(?P<id>[0-9]+)/import_actors$", views.movieImportActors, name="movie_import_actors"),
url(r"^series$", views.seriesList, name="series_list"),
url(r"^series/create$", views.serieCreate, name="serie_create"),
url(r"^series/import$", views.serieImport, name="serie_import"),
url(r"^series/api/list$", views.seriesAPIList, name="series_api_list"),
url(r"^series/api/search$", views.seriesAPISearch, name="series_api_search"),
url(r"^series/(?P<id>[0-9]+)$", views.serieInfos, name="serie_infos"),
url(r"^series/(?P<id>[0-9]+)/edit$", views.serieEdit, name="serie_edit"),
url(r"^series/(?P<id>[0-9]+)/delete$", views.serieDelete, name="serie_delete"),
url(r"^series/(?P<id>[0-9]+)/add_crewmember$", views.serieAddCrewMember, name="serie_add_crewmember"),
url(r"^series/(?P<id>[0-9]+)/add_tag$", views.serieAddTag, name="serie_add_tag"),
url(r"^series/(?P<id>[0-9]+)/add_attribute$", views.serieAddAttribute, name="serie_add_attribute"),
url(r"^series/(?P<id>[0-9]+)/add_link$", views.serieAddLink, name="serie_add_link"),
url(r"^series/(?P<id>[0-9]+)/add_season$", views.serieAddSeason, name="serie_add_season"),
url(r"^series/(?P<id>[0-9]+)/edit_crew$", views.serieEditCrew, name="serie_edit_crew"),
url(r"^series/(?P<id>[0-9]+)/edit_tags$", views.serieEditTags, name="serie_edit_tags"),
url(r"^series/(?P<id>[0-9]+)/edit_attributes$", views.serieEditAttributes, name="serie_edit_attributes"),
url(r"^series/(?P<id>[0-9]+)/edit_links$", views.serieEditLinks, name="serie_edit_links"),
url(r"^series/(?P<id>[0-9]+)/edit_seasons$", views.serieEditSeasons, name="serie_edit_seasons"),
#url(r"^series/(?P<id>[0-9]+)/(?P<sid>[0-9]+)/edit$", views.seasonEdit, name="season_edit"),
url(r"^series/(?P<id>[0-9]+)/(?P<sid>[0-9]+)/add_episode$", views.seasonAddEpisode, name="season_add_episode"),
url(r"^series/(?P<id>[0-9]+)/(?P<sid>[0-9]+)/create_episode$", views.seasonCreateEpisode, name="season_create_episode"),
url(r"^series/(?P<id>[0-9]+)/(?P<sid>[0-9]+)/edit_episodes$", views.seasonEditEpisodes, name="season_edit_episodes"),
url(r"^sagas$", views.sagasList, name="sagas_list"),
url(r"^sagas/create$", views.sagaCreate, name="saga_create"),
url(r"^sagas/(?P<id>[0-9]+)$", views.sagaInfos, name="saga_infos"),
url(r"^sagas/(?P<id>[0-9]+)/edit$", views.sagaEdit, name="saga_edit"),
url(r"^sagas/(?P<id>[0-9]+)/delete$", views.sagaDelete, name="saga_delete"),
url(r"^sagas/(?P<id>[0-9]+)/add_movie$", views.sagaAddMovie, name="saga_add_movie"),
url(r"^sagas/(?P<id>[0-9]+)/edit_movies$", views.sagaEditMovies, name="saga_edit_movies"),
url(r"^year-(?P<year>[0-9]{4})$", views.moviesYear, name="movies_year"),
url(r"^search$", views.moviesSearch, name="movies_search"),
url(r"^stats$", views.stats, name="movies_stats"),
url(r"^views$", views.viewsStats, name="movies_views_stats"),
url(r"^views/export$", views.viewsExport, name="movies_views_export"),
url(r"^importV1$", views.importFromV1, name="import_v1"),
]
| gpl-3.0 | -7,166,531,940,393,566,000 | 72.513514 | 124 | 0.644118 | false |
ToAruShiroiNeko/revscoring | revscoring/features/feature.py | 1 | 7513 | """
.. autoclass:: revscoring.Feature
:members:
"""
from math import log as math_log
from ..dependencies import Dependent
# Sets up refences to overloaded function names
math_max = max
math_min = min
class Feature(Dependent):
"""
Represents a predictive feature.
:Parameters:
name : str
The name of the feature
process : `func`
A function that will generate a feature value
return_type : `type`
A type to compare the return of this function to.
dependencies : `list`(`hashable`)
An ordered list of dependencies that correspond
to the `*args` of `process`
"""
def __init__(self, name, process=None, *, returns=None, depends_on=None):
super().__init__(name, process, depends_on)
self.returns = returns
def __call__(self, *args, **kwargs):
value = super().__call__(*args, **kwargs)
if __debug__:
return self.validate(value)
else:
return value
def __hash__(self):
return hash(('feature', self.name))
# Binary math
def __add__(self, summand):
return add(self, summand)
def __sub__(self, subband):
return sub(self, subband)
def __truediv__(self, divisor):
return div(self, divisor)
def __mul__(self, multiplier):
return mul(self, multiplier)
# Comparison
def __lt__(self, other):
return lt(self, other)
def __le__(self, other):
return le(self, other)
def __eq__(self, other):
return eq(self, other)
def __ne__(self, other):
return ne(self, other)
def __gt__(self, other):
return gt(self, other)
def __ge__(self, other):
return ge(self, other)
def validate(self, value):
if isinstance(value, self.returns):
return value
else:
raise ValueError("Expected {0}, but got {1} instead."
.format(self.returns, type(value)))
@classmethod
def or_constant(self, val):
if isinstance(val, Feature):
return val
else:
return Constant(val)
class Constant(Feature):
def __init__(self, value):
self.value = value
super().__init__(str(value), self._process,
returns=type(value), depends_on=[])
def _process(self):
return self.value
class Modifier(Feature):
pass
class BinaryOperator(Modifier):
CHAR = "?"
def __init__(self, left, right, returns=None):
left = Feature.or_constant(left)
right = Feature.or_constant(right)
name = "({0} {1} {2})".format(left.name, self.CHAR, right.name)
if returns is None:
returns = type(self.operate(left.returns(), right.returns()))
super().__init__(name, self.operate, returns=returns,
depends_on=[left, right])
def operate(self, left, right):
raise NotImplementedError()
class add(BinaryOperator):
"""
Generates a feature that represents the addition of
two :class:`revscoring.Feature` or constant values.
"""
CHAR = "+"
def operate(self, left, right):
return left + right
class sub(BinaryOperator):
"""
Generates a feature that represents the subtraction of
two :class:`revscoring.Feature` or constant values.
"""
CHAR = "-"
def operate(self, left, right):
return left - right
class mul(BinaryOperator):
"""
Generates a feature that represents the multiplacation of
two :class:`revscoring.Feature` or constant values.
"""
CHAR = "*"
def operate(self, left, right):
return left * right
class div(BinaryOperator):
"""
Generates a feature that represents the division of
two :class:`revscoring.Feature` or constant values.
"""
CHAR = "/"
def __init__(self, left, right):
# Explicitly setting return type to float.
super().__init__(left, right, returns=float)
def operate(self, left, right):
return left / right
class Comparison(BinaryOperator):
def __init__(self, left, right):
# Explicitly setting return type to boolean.
super().__init__(left, right, returns=bool)
class gt(Comparison):
"""
Generates a feature that represents the greater-than relationship of
two :class:`revscoring.Feature` or constant values.
"""
CHAR = ">"
def operate(self, left, right):
return left > right
class lt(Comparison):
"""
Generates a feature that represents the less-than relationship of
two :class:`revscoring.Feature` or constant values.
"""
CHAR = "<"
def operate(self, left, right):
return left < right
class ge(Comparison):
"""
Generates a feature that represents the greater-than-or-equal relationship
of two :class:`revscoring.Feature` or constant values.
"""
CHAR = ">="
def operate(self, left, right):
return left >= right
class le(Comparison):
"""
Generates a feature that represents the less-than-or-equal relationship of
two :class:`revscoring.Feature` or constant values.
"""
CHAR = "<="
def operate(self, left, right):
return left <= right
class eq(Comparison):
"""
Generates a feature that represents the equality of two
:class:`revscoring.Feature` or constant values.
"""
CHAR = "=="
def operate(self, left, right):
return left == right
class ne(Comparison):
"""
Generates a feature that represents the inequality of two
:class:`revscoring.Feature` or constant values.
"""
CHAR = "!="
def operate(self, left, right):
return left != right
class max(Modifier):
"""
Generates a feature that represents the maximum of a set of
:class:`revscoring.Feature` or constant values.
"""
def __init__(self, *args):
dependencies = [Feature.or_constant(arg) for arg in args]
returns = float
# Hardcoded even though max can return strings, it
# shouldn't ever do that
name = "max({0})".format(", ".join(f.name for f in dependencies))
super().__init__(name, self._process, returns=returns,
depends_on=dependencies)
def _process(self, *feature_values):
return float(math_max(*feature_values))
class min(Modifier):
"""
Generates a feature that represents the minimum of a set of
:class:`revscoring.Feature` or constant values.
"""
def __init__(self, *args):
dependencies = [Feature.or_constant(arg) for arg in args]
returns = float
# Hardcoded even though max can return strings, it
# shouldn't ever do that
name = "min({0})".format(", ".join(f.name for f in dependencies))
super().__init__(name, self._process, returns=returns,
depends_on=dependencies)
def _process(self, *feature_values):
return float(math_min(*feature_values))
class log(Modifier):
"""
Generates a feature that represents the log of a
:class:`revscoring.Feature`'s value.
"""
def __init__(self, feature):
feature = Feature.or_constant(feature)
super().__init__("log({0})".format(feature.name), self._process,
returns=float, depends_on=[feature])
def _process(self, feature_value):
return math_log(feature_value)
| mit | 48,726,279,513,784,540 | 23.632787 | 78 | 0.593238 | false |
y-lan/python-hiveserver2 | example/example.py | 1 | 4896 | import sys
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import TBufferedTransport
import sasl
from cloudera.thrift_sasl import TSaslClientTransport
from TCLIService import TCLIService
from TCLIService.ttypes import TOpenSessionReq, TGetTablesReq, TFetchResultsReq,\
TStatusCode, TGetResultSetMetadataReq, TGetColumnsReq, TType, TTypeId, \
TExecuteStatementReq, TGetOperationStatusReq, TFetchOrientation, TCloseOperationReq, \
TCloseSessionReq, TGetSchemasReq, TCancelOperationReq
## optional auth values:
## PLAIN: when 'hive.server2.authentication' is set as 'LDAP' or 'NONE'
## NOSASL: when 'hive.server2.authentication' is set as 'NOSASL'
auth = 'PLAIN' # PLAIN or NOSASL
username = ''
password = ''
host = 'localhost'
port = 10000
test_hql = 'select * from foo limit 10'
def get_type(typeDesc):
for ttype in typeDesc.types:
if ttype.primitiveEntry is not None:
return TTypeId._VALUES_TO_NAMES[ttype.primitiveEntry.type]
elif ttype.mapEntry is not None:
return ttype.mapEntry
elif ttype.unionEntry is not None:
return ttype.unionEntry
elif ttype.arrayEntry is not None:
return ttype.arrayEntry
elif ttype.structEntry is not None:
return ttype.structEntry
elif ttype.userDefinedTypeEntry is not None:
return ttype.userDefinedTypeEntry
def get_value(colValue):
if colValue.boolVal is not None:
return colValue.boolVal.value
elif colValue.byteVal is not None:
return colValue.byteVal.value
elif colValue.i16Val is not None:
return colValue.i16Val.value
elif colValue.i32Val is not None:
return colValue.i32Val.value
elif colValue.i64Val is not None:
return colValue.i64Val.value
elif colValue.doubleVal is not None:
return colValue.doubleVal.value
elif colValue.stringVal is not None:
return colValue.stringVal.value
# for SASL connection
def sasl_factory():
saslc = sasl.Client()
saslc.setAttr("username", username)
saslc.setAttr("password", password)
saslc.init()
return saslc
try:
print "1) Preparing the connection..."
sock = TSocket(host, port)
if auth == 'NOSASL':
transport = TBufferedTransport(sock)
else:
transport = TSaslClientTransport(sasl_factory, "PLAIN", sock)
client = TCLIService.Client(TBinaryProtocol(transport))
transport.open()
print "\n2) Opening Session..."
res = client.OpenSession(TOpenSessionReq(username=username, password=password))
session = res.sessionHandle
print('Session opened. ( %s )' % session.sessionId)
## 3) Show tables
print "\n3) Try fetching table list..."
query = TExecuteStatementReq(session, statement="show tables", confOverlay={})
response = client.ExecuteStatement(query)
opHandle = response.operationHandle
fetchReq = TFetchResultsReq(operationHandle=opHandle, orientation=TFetchOrientation.FETCH_NEXT, maxRows=100);
resultsRes = client.FetchResults(fetchReq);
## close operation && release lock
req = TCloseOperationReq(operationHandle=opHandle)
client.CloseOperation(req)
print('-'*32)
for row in resultsRes.results.rows:
print row.colVals[0].stringVal.value
print('-'*32)
# 4) try execute HQL
print "\n4) Executing Test HQL: %s..." % test_hql
query = TExecuteStatementReq(session, statement=test_hql, confOverlay={})
response = client.ExecuteStatement(query)
opHandle = response.operationHandle
print('-'*32)
meta = []
if opHandle.hasResultSet:
metaReq = TGetResultSetMetadataReq(operationHandle=opHandle)
schema = client.GetResultSetMetadata(metaReq).schema
for i, col in enumerate(schema.columns):
type = get_type(col.typeDesc)
name = col.columnName
meta.append(type)
if i == 0:
print name,
else:
print ', ' + name,
print
print('-'*32)
fetchReq = TFetchResultsReq(operationHandle=opHandle, orientation=TFetchOrientation.FETCH_NEXT, maxRows=100);
resultsRes = client.FetchResults(fetchReq);
for row in resultsRes.results.rows:
for i, col in enumerate(row.colVals):
if i == 0:
print get_value(col),
else:
print ', ' + str(get_value(col)),
print
print('-'*32)
## important !!
## don't forget to close operation & session after query
## or you may cause resource leak in server side
req = TCloseOperationReq(operationHandle=opHandle)
client.CloseOperation(req)
print "\n# 5) Closing Session..."
req = TCloseSessionReq(sessionHandle=session)
client.CloseSession(req)
print("Bye")
except Exception, e:
print e
| apache-2.0 | 841,611,044,743,546,000 | 33 | 113 | 0.685253 | false |
ikoveshnikov/tempesta | tempesta_fw/t/functional/sched/test_http.py | 1 | 8359 | """
Test fo http scheduler:
"""
from __future__ import print_function
import asyncore
from helpers import tempesta, deproxy, tf_cfg, chains
from testers import functional
class HttpRules(functional.FunctionalTest):
"""All requests must be forwarded to the right server groups according to
sched_http_rules.
"""
requests_n = 20
config = (
'cache 0;\n'
'\n'
'sched_http_rules {\n'
' match uri_p uri prefix "/static";\n'
' match uri_s uri suffix ".php";\n'
' match host_p host prefix "static.";\n'
' match host_s host suffix "tempesta-tech.com";\n'
' match host_e host eq "foo.example.com";\n'
' match hdr_h_p hdr_host prefix "bar.";\n'
' match hdr_h_e hdr_host eq "buzz.natsys-lab.com";\n'
' match hdr_h_s hdr_host suffix "natsys-lab.com";\n'
' match hdr_r_e hdr_ref eq "example.com";\n'
' match hdr_r_s hdr_ref suffix ".com";\n'
' match hdr_r_p hdr_ref prefix "http://example.com";\n'
'}\n'
'\n')
def make_chains(self, uri, extra_header=(None, None)):
chain = chains.base(uri=uri)
header, value = extra_header
if not header is None:
for req in [chain.request, chain.fwd_request]:
req.headers.delete_all(header)
req.headers.add(header, value)
req.update()
return [chain for _ in range(self.requests_n)]
def create_client(self):
# Client will be created for every server.
for server in self.servers:
server.client = deproxy.Client()
def create_servers(self):
port = tempesta.upstream_port_start_from()
server_options = [
(('uri_p'), ('/static/index.html'), None, None),
(('uri_s'), ('/script.php'), None, None),
(('host_p'), ('/'), ('host'), ('static.example.com')),
(('host_s'), ('/'), ('host'), ('s.tempesta-tech.com')),
(('host_e'), ('/'), ('host'), ('foo.example.com')),
(('hdr_h_p'), ('/'), ('host'), ('bar.example.com')),
(('hdr_h_s'), ('/'), ('host'), ('test.natsys-lab.com')),
(('hdr_h_e'), ('/'), ('host'), ('buzz.natsys-lab.com')),
(('hdr_r_e'), ('/'), ('referer'), ('example.com')),
(('hdr_r_s'), ('/'), ('referer'), ('http://example.com')),
(('hdr_r_p'), ('/'), ('referer'),
('http://example.com/cgi-bin/show.pl')),
(('default'), ('/'), None, None)]
for group, uri, header, value in server_options:
# Dont need too lot connections here.
server = deproxy.Server(port=port, conns_n=1)
port += 1
server.group = group
server.chains = self.make_chains(uri=uri,
extra_header=(header, value))
self.servers.append(server)
def configure_tempesta(self):
""" Add every server to it's own server group with default scheduler.
"""
for s in self.servers:
sg = tempesta.ServerGroup(s.group)
sg.add_server(s.ip, s.port, s.conns_n)
self.tempesta.config.add_sg(sg)
def create_tester(self):
self.testers = []
for server in self.servers:
tester = HttpSchedTester(server.client, [server])
tester.response_cb = self.response_recieved
tester.message_chains = server.chains
self.testers.append(tester)
def routine(self):
for i in range(self.requests_n):
self.responses_recieved = 0
for tester in self.testers:
tester.configure(i)
# Run asyncore loop with default timeout
self.testers[0].loop()
for tester in self.testers:
tester.check_expectations()
def init(self):
self.tempesta.config.set_defconfig(self.config)
self.configure_tempesta()
for server in self.servers:
server.start()
self.tempesta.start()
for server in self.servers:
server.client.start()
for tester in self.testers:
tester.start()
def test_scheduler(self):
self.init()
self.routine()
self.tempesta.get_stats()
self.assert_tempesta()
def response_recieved(self):
self.responses_recieved += 1
if self.responses_recieved == len(self.servers):
raise asyncore.ExitNow
def setUp(self):
self.testers = []
functional.FunctionalTest.setUp(self)
def tearDown(self):
if self.tempesta:
self.tempesta.stop()
for tester in self.testers:
tester.stop()
for server in self.servers:
server.client.stop("Deproxy client")
for server in self.servers:
server.stop("Deproxy server")
class HttpRulesBackupServers(HttpRules):
config = (
'cache 0;\n'
'\n'
'sched_http_rules {\n'
' match primary * * * backup=backup;\n'
'}\n'
'\n')
def make_chains(self, empty=True):
chain = None
if empty:
chain = deproxy.MessageChain.empty()
else:
chain = chains.base()
return [chain for _ in range(self.requests_n)]
def create_server_helper(self, group, port):
server = deproxy.Server(port=port, conns_n=1)
server.group = group
server.chains = self.make_chains()
return server
def create_servers(self):
port = tempesta.upstream_port_start_from()
self.main_server = self.create_server_helper('primary', port)
self.backup_server = self.create_server_helper('backup', port + 1)
self.servers.append(self.main_server)
self.servers.append(self.backup_server)
def test_scheduler(self):
self.init()
# Main server is online, backup server must not recieve traffic.
self.main_server.tester.message_chains = (
self.make_chains(empty=False))
self.backup_server.tester.message_chains = (
self.make_chains(empty=True))
self.routine()
# Shutdown main server, responses must be forwarded to backup.
self.main_server.tester.client.stop()
self.main_server.stop()
self.main_server.tester.message_chains = (
self.make_chains(empty=True))
self.backup_server.tester.message_chains = (
self.make_chains(empty=False))
self.routine()
# Return main server back operational.
self.testers.remove(self.main_server.tester)
self.main_server = self.create_server_helper(
group=self.main_server.group, port=self.main_server.port)
tester = HttpSchedTester(deproxy.Client(), [self.main_server])
tester.response_cb = self.response_recieved
self.testers.append(tester)
self.main_server.tester.message_chains = (
self.make_chains(empty=False))
self.backup_server.tester.message_chains = (
self.make_chains(empty=True))
self.main_server.start()
self.main_server.tester.client.start()
self.routine()
# Check tempesta for no errors
self.tempesta.get_stats()
self.assert_tempesta()
def response_recieved(self):
self.responses_recieved += 1
if self.responses_recieved == 1:
raise asyncore.ExitNow
class HttpSchedTester(deproxy.Deproxy):
def __init__(self, *args, **kwargs):
deproxy.Deproxy.__init__(self, *args, **kwargs)
def configure(self, chain_n):
if chain_n in range(len(self.message_chains)):
self.current_chain = self.message_chains[chain_n]
else:
self.current_chain = deproxy.MessageChain.empty()
self.recieved_chain = deproxy.MessageChain.empty()
self.client.clear()
self.client.set_request(self.current_chain)
def recieved_response(self, response):
# A lot of clients running, dont raise asyncore.ExitNow directly
# instead call the
self.recieved_chain.response = response
self.response_cb()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| gpl-2.0 | 6,169,472,439,387,752,000 | 33.258197 | 77 | 0.564063 | false |
tdtrask/ansible | lib/ansible/modules/system/user.py | 1 | 76201 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Stephen Fromm <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: user
author:
- Stephen Fromm (@sfromm)
version_added: "0.2"
short_description: Manage user accounts
notes:
- There are specific requirements per platform on user management utilities. However
they generally come pre-installed with the system and Ansible will require they
are present at runtime. If they are not, a descriptive error message will be shown.
- For Windows targets, use the M(win_user) module instead.
description:
- Manage user accounts and user attributes.
- For Windows targets, use the M(win_user) module instead.
options:
name:
description:
- Name of the user to create, remove or modify.
required: true
aliases: [ user ]
comment:
description:
- Optionally sets the description (aka I(GECOS)) of user account.
uid:
description:
- Optionally sets the I(UID) of the user.
non_unique:
description:
- Optionally when used with the -u option, this option allows to
change the user ID to a non-unique value.
type: bool
default: "no"
version_added: "1.1"
seuser:
description:
- Optionally sets the seuser type (user_u) on selinux enabled systems.
version_added: "2.1"
group:
description:
- Optionally sets the user's primary group (takes a group name).
groups:
description:
- Puts the user in list of groups. When set to the empty string ('groups='),
the user is removed from all groups except the primary group.
- Before version 2.3, the only input format allowed was a 'comma separated string',
now it should be able to accept YAML lists also.
append:
description:
- If C(yes), will only add groups, not set them to just the list
in I(groups).
type: bool
default: "no"
shell:
description:
- Optionally set the user's shell.
- On Mac OS X, before version 2.5, the default shell for non-system users was
/usr/bin/false. Since 2.5, the default shell for non-system users on
Mac OS X is /bin/bash.
home:
description:
- Optionally set the user's home directory.
skeleton:
description:
- Optionally set a home skeleton directory. Requires create_home option!
version_added: "2.0"
password:
description:
- Optionally set the user's password to this crypted value. See
the user example in the github examples directory for what this looks
like in a playbook. See U(http://docs.ansible.com/ansible/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module)
for details on various ways to generate these password values.
Note on Darwin system, this value has to be cleartext.
Beware of security issues.
state:
description:
- Whether the account should exist or not, taking action if the state is different from what is stated.
choices: [ absent, present ]
default: present
create_home:
description:
- Unless set to C(no), a home directory will be made for the user
when the account is created or if the home directory does not
exist.
- Changed from C(createhome) to C(create_home) in version 2.5.
type: bool
default: 'yes'
aliases: ['createhome']
move_home:
description:
- If set to C(yes) when used with C(home=), attempt to move the
user's home directory to the specified directory if it isn't there
already.
type: bool
default: "no"
system:
description:
- When creating an account, setting this to C(yes) makes the user a
system account. This setting cannot be changed on existing users.
type: bool
default: "no"
force:
description:
- When used with C(state=absent), behavior is as with C(userdel --force).
type: bool
default: "no"
login_class:
description:
- Optionally sets the user's login class for FreeBSD, OpenBSD and NetBSD systems.
remove:
description:
- When used with C(state=absent), behavior is as with C(userdel --remove).
type: bool
default: "no"
generate_ssh_key:
description:
- Whether to generate a SSH key for the user in question.
This will B(not) overwrite an existing SSH key.
type: bool
default: "no"
version_added: "0.9"
ssh_key_bits:
description:
- Optionally specify number of bits in SSH key to create.
default: default set by ssh-keygen
version_added: "0.9"
ssh_key_type:
description:
- Optionally specify the type of SSH key to generate.
Available SSH key types will depend on implementation
present on target host.
default: rsa
version_added: "0.9"
ssh_key_file:
description:
- Optionally specify the SSH key filename. If this is a relative
filename then it will be relative to the user's home directory.
default: .ssh/id_rsa
version_added: "0.9"
ssh_key_comment:
description:
- Optionally define the comment for the SSH key.
default: ansible-generated on $HOSTNAME
version_added: "0.9"
ssh_key_passphrase:
description:
- Set a passphrase for the SSH key. If no
passphrase is provided, the SSH key will default to
having no passphrase.
version_added: "0.9"
update_password:
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
choices: [ always, on_create ]
default: always
version_added: "1.3"
expires:
description:
- An expiry time for the user in epoch, it will be ignored on platforms that do not support this.
Currently supported on Linux and FreeBSD.
version_added: "1.9"
local:
description:
- Forces the use of "local" command alternatives on platforms that implement it.
This is useful in environments that use centralized authentification when you want to manipulate the local users.
I.E. it uses `luseradd` instead of `useradd`.
- This requires that these commands exist on the targeted host, otherwise it will be a fatal error.
type: bool
default: 'no'
version_added: "2.4"
'''
EXAMPLES = '''
- name: Add the user 'johnd' with a specific uid and a primary group of 'admin'
user:
name: johnd
comment: John Doe
uid: 1040
group: admin
- name: Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
user:
name: james
shell: /bin/bash
groups: admins,developers
append: yes
- name: Remove the user 'johnd'
user:
name: johnd
state: absent
remove: yes
- name: Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa
user:
name: jsmith
generate_ssh_key: yes
ssh_key_bits: 2048
ssh_key_file: .ssh/id_rsa
- name: Added a consultant whose account you want to expire
user:
name: james18
shell: /bin/zsh
groups: developers
expires: 1422403387
'''
import grp
import os
import platform
import pwd
import shutil
import socket
import time
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import load_platform_subclass, AnsibleModule
try:
import spwd
HAVE_SPWD = True
except:
HAVE_SPWD = False
class User(object):
"""
This is a generic User manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- create_user()
- remove_user()
- modify_user()
- ssh_key_gen()
- ssh_key_fingerprint()
- user_exists()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
SHADOWFILE = '/etc/shadow'
DATE_FORMAT = '%Y-%m-%d'
def __new__(cls, *args, **kwargs):
return load_platform_subclass(User, args, kwargs)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.uid = module.params['uid']
self.non_unique = module.params['non_unique']
self.seuser = module.params['seuser']
self.group = module.params['group']
self.comment = module.params['comment']
self.shell = module.params['shell']
self.password = module.params['password']
self.force = module.params['force']
self.remove = module.params['remove']
self.create_home = module.params['create_home']
self.move_home = module.params['move_home']
self.skeleton = module.params['skeleton']
self.system = module.params['system']
self.login_class = module.params['login_class']
self.append = module.params['append']
self.sshkeygen = module.params['generate_ssh_key']
self.ssh_bits = module.params['ssh_key_bits']
self.ssh_type = module.params['ssh_key_type']
self.ssh_comment = module.params['ssh_key_comment']
self.ssh_passphrase = module.params['ssh_key_passphrase']
self.update_password = module.params['update_password']
self.home = module.params['home']
self.expires = None
self.groups = None
self.local = module.params['local']
if module.params['groups'] is not None:
self.groups = ','.join(module.params['groups'])
if module.params['expires']:
try:
self.expires = time.gmtime(module.params['expires'])
except Exception as e:
module.fail_json(msg="Invalid expires time %s: %s" % (self.expires, to_native(e)))
if module.params['ssh_key_file'] is not None:
self.ssh_file = module.params['ssh_key_file']
else:
self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
def execute_command(self, cmd, use_unsafe_shell=False, data=None, obey_checkmode=True):
if self.module.check_mode and obey_checkmode:
self.module.debug('In check mode, would have run: "%s"' % cmd)
return (0, '', '')
else:
# cast all args to strings ansible-modules-core/issues/4397
cmd = [str(x) for x in cmd]
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def remove_user_userdel(self):
if self.local:
command_name = 'luserdel'
else:
command_name = 'userdel'
cmd = [self.module.get_bin_path(command_name, True)]
if self.force:
cmd.append('-f')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self):
if self.local:
command_name = 'luseradd'
else:
command_name = 'useradd'
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.seuser is not None:
cmd.append('-Z')
cmd.append(self.seuser)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
elif self.group_exists(self.name):
# use the -N option (no user group) if a group already
# exists with the same name as the user to prevent
# errors from useradd trying to create a group when
# USERGROUPS_ENAB is set in /etc/login.defs.
if os.path.exists('/etc/redhat-release'):
dist = platform.dist()
major_release = int(dist[1].split('.')[0])
if major_release <= 5:
cmd.append('-n')
else:
cmd.append('-N')
elif os.path.exists('/etc/SuSE-release'):
# -N did not exist in useradd before SLE 11 and did not
# automatically create a group
dist = platform.dist()
major_release = int(dist[1].split('.')[0])
if major_release >= 12:
cmd.append('-N')
else:
cmd.append('-N')
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.expires:
cmd.append('-e')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
if not self.local:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def _check_usermod_append(self):
# check if this version of usermod can append groups
if self.local:
command_name = 'lusermod'
else:
command_name = 'usermod'
usermod_path = self.module.get_bin_path(command_name, True)
# for some reason, usermod --help cannot be used by non root
# on RH/Fedora, due to lack of execute bit for others
if not os.access(usermod_path, os.X_OK):
return False
cmd = [usermod_path, '--help']
(rc, data1, data2) = self.execute_command(cmd, obey_checkmode=False)
helpout = data1 + data2
# check if --append exists
lines = to_native(helpout).split('\n')
for line in lines:
if line.strip().startswith('-a, --append'):
return True
return False
def modify_user_usermod(self):
if self.local:
command_name = 'lusermod'
else:
command_name = 'usermod'
cmd = [self.module.get_bin_path(command_name, True)]
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
# get a list of all groups for the user, including the primary
current_groups = self.user_group_membership(exclude_primary=False)
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
if has_append:
cmd.append('-a')
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if self.append and not has_append:
cmd.append('-A')
cmd.append(','.join(group_diff))
else:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.expires:
cmd.append('-e')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def group_exists(self, group):
try:
# Try group as a gid first
grp.getgrgid(int(group))
return True
except (ValueError, KeyError):
try:
grp.getgrnam(group)
return True
except KeyError:
return False
def group_info(self, group):
if not self.group_exists(group):
return False
try:
# Try group as a gid first
return list(grp.getgrgid(int(group)))
except (ValueError, KeyError):
return list(grp.getgrnam(group))
def get_groups_set(self, remove_existing=True):
if self.groups is None:
return None
info = self.user_info()
groups = set(x.strip() for x in self.groups.split(',') if x)
for g in groups.copy():
if not self.group_exists(g):
self.module.fail_json(msg="Group %s does not exist" % (g))
if info and remove_existing and self.group_info(g)[2] == info[3]:
groups.remove(g)
return groups
def user_group_membership(self, exclude_primary=True):
''' Return a list of groups the user belongs to '''
groups = []
info = self.get_pwd_info()
for group in grp.getgrall():
if self.name in group.gr_mem:
# Exclude the user's primary group by default
if not exclude_primary:
groups.append(group[0])
else:
if info[3] != group.gr_gid:
groups.append(group[0])
return groups
def user_exists(self):
try:
if pwd.getpwnam(self.name):
return True
except KeyError:
return False
def get_pwd_info(self):
if not self.user_exists():
return False
return list(pwd.getpwnam(self.name))
def user_info(self):
if not self.user_exists():
return False
info = self.get_pwd_info()
if len(info[1]) == 1 or len(info[1]) == 0:
info[1] = self.user_password()
return info
def user_password(self):
passwd = ''
if HAVE_SPWD:
try:
passwd = spwd.getspnam(self.name)[1]
except KeyError:
return passwd
if not self.user_exists():
return passwd
elif self.SHADOWFILE:
# Read shadow file for user's encrypted password string
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
for line in open(self.SHADOWFILE).readlines():
if line.startswith('%s:' % self.name):
passwd = line.split(':')[1]
return passwd
def get_ssh_key_path(self):
info = self.user_info()
if os.path.isabs(self.ssh_file):
ssh_key_file = self.ssh_file
else:
ssh_key_file = os.path.join(info[5], self.ssh_file)
return ssh_key_file
def ssh_key_gen(self):
info = self.user_info()
if not os.path.exists(info[5]) and not self.module.check_mode:
return (1, '', 'User %s home directory does not exist' % self.name)
ssh_key_file = self.get_ssh_key_path()
ssh_dir = os.path.dirname(ssh_key_file)
if not os.path.exists(ssh_dir):
if self.module.check_mode:
return (0, '', '')
try:
os.mkdir(ssh_dir, int('0700', 8))
os.chown(ssh_dir, info[2], info[3])
except OSError as e:
return (1, '', 'Failed to create %s: %s' % (ssh_dir, to_native(e)))
if os.path.exists(ssh_key_file):
return (None, 'Key already exists', '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-t')
cmd.append(self.ssh_type)
if self.ssh_bits > 0:
cmd.append('-b')
cmd.append(self.ssh_bits)
cmd.append('-C')
cmd.append(self.ssh_comment)
cmd.append('-f')
cmd.append(ssh_key_file)
cmd.append('-N')
if self.ssh_passphrase is not None:
cmd.append(self.ssh_passphrase)
else:
cmd.append('')
(rc, out, err) = self.execute_command(cmd)
if rc == 0 and not self.module.check_mode:
# If the keys were successfully created, we should be able
# to tweak ownership.
os.chown(ssh_key_file, info[2], info[3])
os.chown('%s.pub' % ssh_key_file, info[2], info[3])
return (rc, out, err)
def ssh_key_fingerprint(self):
ssh_key_file = self.get_ssh_key_path()
if not os.path.exists(ssh_key_file):
return (1, 'SSH Key file %s does not exist' % ssh_key_file, '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-l')
cmd.append('-f')
cmd.append(ssh_key_file)
return self.execute_command(cmd, obey_checkmode=False)
def get_ssh_public_key(self):
ssh_public_key_file = '%s.pub' % self.get_ssh_key_path()
try:
f = open(ssh_public_key_file)
ssh_public_key = f.read().strip()
f.close()
except IOError:
return None
return ssh_public_key
def create_user(self):
# by default we use the create_user_useradd method
return self.create_user_useradd()
def remove_user(self):
# by default we use the remove_user_userdel method
return self.remove_user_userdel()
def modify_user(self):
# by default we use the modify_user_usermod method
return self.modify_user_usermod()
def create_homedir(self, path):
if not os.path.exists(path):
if self.skeleton is not None:
skeleton = self.skeleton
else:
skeleton = '/etc/skel'
if os.path.exists(skeleton):
try:
shutil.copytree(skeleton, path, symlinks=True)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
else:
try:
os.makedirs(path)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
def chown_homedir(self, uid, gid, path):
try:
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
# ===========================================
class FreeBsdUser(User):
"""
This is a FreeBSD User manipulation class - it uses the pw command
to manipulate the user database, followed by the chpass command
to change the password.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'FreeBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def remove_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'userdel',
'-n',
self.name
]
if self.remove:
cmd.append('-r')
return self.execute_command(cmd)
def create_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'useradd',
'-n',
self.name,
]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.expires:
days = (time.mktime(self.expires) - time.time()) // 86400
cmd.append('-e')
cmd.append(str(int(days)))
# system cannot be handled currently - should we error if its requested?
# create the user
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password in a second command
if self.password is not None:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
return (rc, out, err)
def modify_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'usermod',
'-n',
self.name
]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
for line in open(self.SHADOWFILE).readlines():
if line.startswith('%s:' % self.name):
user_login_class = line.split(':')[4]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.expires:
days = (time.mktime(self.expires) - time.time()) // 86400
cmd.append('-e')
cmd.append(str(int(days)))
# modify the user if cmd will do anything
if cmd_len != len(cmd):
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password in a second command
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
return (rc, out, err)
class OpenBSDUser(User):
"""
This is a OpenBSD User manipulation class.
Main differences are that OpenBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'OpenBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None and self.password != '*':
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups_option = '-S'
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_option = '-G'
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append(groups_option)
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name]
(rc, out, err) = self.execute_command(userinfo_cmd, obey_checkmode=False)
for line in out.splitlines():
tokens = line.split()
if tokens[0] == 'class' and len(tokens) == 2:
user_login_class = tokens[1]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None \
and self.password != '*' and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
class NetBSDUser(User):
"""
This is a NetBSD User manipulation class.
Main differences are that NetBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'NetBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups = set(current_groups).union(groups)
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
class SunOS(User):
"""
This is a SunOS User manipulation class - The main difference between
this class and the generic user class is that Solaris-type distros
don't support the concept of a "system" account and we need to
edit the /etc/shadow file manually to set a password. (Ugh)
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'SunOS'
distribution = None
SHADOWFILE = '/etc/shadow'
def get_password_defaults(self):
# Read password aging defaults
try:
minweeks = ''
maxweeks = ''
warnweeks = ''
for line in open("/etc/default/passwd", 'r'):
line = line.strip()
if (line.startswith('#') or line == ''):
continue
key, value = line.split('=')
if key == "MINWEEKS":
minweeks = value.rstrip('\n')
elif key == "MAXWEEKS":
maxweeks = value.rstrip('\n')
elif key == "WARNWEEKS":
warnweeks = value.rstrip('\n')
except Exception as err:
self.module.fail_json(msg="failed to read /etc/default/passwd: %s" % to_native(err))
return (minweeks, maxweeks, warnweeks)
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
if not self.module.check_mode:
# we have to set the password by editing the /etc/shadow file
if self.password is not None:
minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
line = to_native(line, errors='surrogate_or_strict')
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() // 86400))
if minweeks:
fields[3] = str(int(minweeks) * 7)
if maxweeks:
fields[4] = str(int(maxweeks) * 7)
if warnweeks:
fields[5] = str(int(warnweeks) * 7)
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
except Exception as err:
self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups.update(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
# modify the user if cmd will do anything
if cmd_len != len(cmd):
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password by editing the /etc/shadow file
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
(rc, out, err) = (0, '', '')
if not self.module.check_mode:
minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
line = to_native(line, errors='surrogate_or_strict')
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() // 86400))
if minweeks:
fields[3] = str(int(minweeks) * 7)
if maxweeks:
fields[4] = str(int(maxweeks) * 7)
if warnweeks:
fields[5] = str(int(warnweeks) * 7)
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
rc = 0
except Exception as err:
self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
return (rc, out, err)
class DarwinUser(User):
"""
This is a Darwin Mac OS X User manipulation class.
Main differences are that Darwin:-
- Handles accounts in a database managed by dscl(1)
- Has no useradd/groupadd
- Does not create home directories
- User password must be cleartext
- UID must be given
- System users must ben under 500
This overrides the following methods from the generic class:-
- user_exists()
- create_user()
- remove_user()
- modify_user()
"""
platform = 'Darwin'
distribution = None
SHADOWFILE = None
dscl_directory = '.'
fields = [
('comment', 'RealName'),
('home', 'NFSHomeDirectory'),
('shell', 'UserShell'),
('uid', 'UniqueID'),
('group', 'PrimaryGroupID'),
]
def _get_dscl(self):
return [self.module.get_bin_path('dscl', True), self.dscl_directory]
def _list_user_groups(self):
cmd = self._get_dscl()
cmd += ['-search', '/Groups', 'GroupMembership', self.name]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
groups = []
for line in out.splitlines():
if line.startswith(' ') or line.startswith(')'):
continue
groups.append(line.split()[0])
return groups
def _get_user_property(self, property):
'''Return user PROPERTY as given my dscl(1) read or None if not found.'''
cmd = self._get_dscl()
cmd += ['-read', '/Users/%s' % self.name, property]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
return None
# from dscl(1)
# if property contains embedded spaces, the list will instead be
# displayed one entry per line, starting on the line after the key.
lines = out.splitlines()
# sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines))
if len(lines) == 1:
return lines[0].split(': ')[1]
else:
if len(lines) > 2:
return '\n'.join([lines[1].strip()] + lines[2:])
else:
if len(lines) == 2:
return lines[1].strip()
else:
return None
def _get_next_uid(self, system=None):
'''
Return the next available uid. If system=True, then
uid should be below of 500, if possible.
'''
cmd = self._get_dscl()
cmd += ['-list', '/Users', 'UniqueID']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
self.module.fail_json(
msg="Unable to get the next available uid",
rc=rc,
out=out,
err=err
)
max_uid = 0
max_system_uid = 0
for line in out.splitlines():
current_uid = int(line.split(' ')[-1])
if max_uid < current_uid:
max_uid = current_uid
if max_system_uid < current_uid and current_uid < 500:
max_system_uid = current_uid
if system and (0 < max_system_uid < 499):
return max_system_uid + 1
return max_uid + 1
def _change_user_password(self):
'''Change password for SELF.NAME against SELF.PASSWORD.
Please note that password must be cleartext.
'''
# some documentation on how is stored passwords on OSX:
# http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/
# http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/
# http://pastebin.com/RYqxi7Ca
# on OSX 10.8+ hash is SALTED-SHA512-PBKDF2
# https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html
# https://gist.github.com/nueh/8252572
cmd = self._get_dscl()
if self.password:
cmd += ['-passwd', '/Users/%s' % self.name, self.password]
else:
cmd += ['-create', '/Users/%s' % self.name, 'Password', '*']
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Error when changing password', err=err, out=out, rc=rc)
return (rc, out, err)
def _make_group_numerical(self):
'''Convert SELF.GROUP to is stringed numerical value suitable for dscl.'''
if self.group is None:
self.group = 'nogroup'
try:
self.group = grp.getgrnam(self.group).gr_gid
except KeyError:
self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group)
# We need to pass a string to dscl
self.group = str(self.group)
def __modify_group(self, group, action):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
if action == 'add':
option = '-a'
else:
option = '-d'
cmd = ['dseditgroup', '-o', 'edit', option, self.name, '-t', 'user', group]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot %s user "%s" to group "%s".'
% (action, self.name, group), err=err, out=out, rc=rc)
return (rc, out, err)
def _modify_group(self):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
rc = 0
out = ''
err = ''
changed = False
current = set(self._list_user_groups())
if self.groups is not None:
target = set(self.groups.split(','))
else:
target = set([])
if self.append is False:
for remove in current - target:
(_rc, _err, _out) = self.__modify_group(remove, 'delete')
rc += rc
out += _out
err += _err
changed = True
for add in target - current:
(_rc, _err, _out) = self.__modify_group(add, 'add')
rc += _rc
out += _out
err += _err
changed = True
return (rc, err, out, changed)
def _update_system_user(self):
'''Hide or show user on login window according SELF.SYSTEM.
Returns 0 if a change has been made, None otherwise.'''
plist_file = '/Library/Preferences/com.apple.loginwindow.plist'
# http://support.apple.com/kb/HT5017?viewlocale=en_US
cmd = ['defaults', 'read', plist_file, 'HiddenUsersList']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
# returned value is
# (
# "_userA",
# "_UserB",
# userc
# )
hidden_users = []
for x in out.splitlines()[1:-1]:
try:
x = x.split('"')[1]
except IndexError:
x = x.strip()
hidden_users.append(x)
if self.system:
if self.name not in hidden_users:
cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array-add', self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot user "%s" to hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
else:
if self.name in hidden_users:
del (hidden_users[hidden_users.index(self.name)])
cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array'] + hidden_users
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot remove user "%s" from hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
def user_exists(self):
'''Check is SELF.NAME is a known user on the system.'''
cmd = self._get_dscl()
cmd += ['-list', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
return rc == 0
def remove_user(self):
'''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.'''
info = self.user_info()
cmd = self._get_dscl()
cmd += ['-delete', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot delete user "%s".' % self.name, err=err, out=out, rc=rc)
if self.force:
if os.path.exists(info[5]):
shutil.rmtree(info[5])
out += "Removed %s" % info[5]
return (rc, out, err)
def create_user(self, command_name='dscl'):
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name]
(rc, err, out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot create user "%s".' % self.name, err=err, out=out, rc=rc)
self._make_group_numerical()
if self.uid is None:
self.uid = str(self._get_next_uid(self.system))
# Homedir is not created by default
if self.create_home:
if self.home is None:
self.home = '/Users/%s' % self.name
if not self.module.check_mode:
if not os.path.exists(self.home):
os.makedirs(self.home)
self.chown_homedir(int(self.uid), int(self.group), self.home)
# dscl sets shell to /usr/bin/false when UserShell is not specified
# so set the shell to /bin/bash when the user is not a system user
if not self.system and self.shell is None:
self.shell = '/bin/bash'
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot add property "%s" to user "%s".' % (field[0], self.name), err=err, out=out, rc=rc)
out += _out
err += _err
if rc != 0:
return (rc, _err, _out)
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
self._update_system_user()
# here we don't care about change status since it is a creation,
# thus changed is always true.
if self.groups:
(rc, _out, _err, changed) = self._modify_group()
out += _out
err += _err
return (rc, err, out)
def modify_user(self):
changed = None
out = ''
err = ''
if self.group:
self._make_group_numerical()
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
current = self._get_user_property(field[1])
if current is None or current != self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot update property "%s" for user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
changed = rc
out += _out
err += _err
if self.update_password == 'always' and self.password is not None:
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
changed = rc
if self.groups:
(rc, _out, _err, _changed) = self._modify_group()
out += _out
err += _err
if _changed is True:
changed = rc
rc = self._update_system_user()
if rc == 0:
changed = rc
return (changed, out, err)
class AIX(User):
"""
This is a AIX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'AIX'
distribution = None
SHADOWFILE = '/etc/security/passwd'
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self, command_name='useradd'):
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.password is not None:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
# skip if no changes to be made
if len(cmd) == 1:
(rc, out, err) = (None, '', '')
else:
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
(rc2, out2, err2) = self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
else:
(rc2, out2, err2) = (None, '', '')
if rc is not None:
return (rc, out + out2, err + err2)
else:
return (rc2, out + out2, err + err2)
class HPUX(User):
"""
This is a HP-UX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'HP-UX'
distribution = None
SHADOWFILE = '/etc/shadow'
def create_user(self):
cmd = ['/usr/sam/lbin/useradd.sam']
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user(self):
cmd = ['/usr/sam/lbin/userdel.sam']
if self.force:
cmd.append('-F')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = ['/usr/sam/lbin/usermod.sam']
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-F')
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def main():
ssh_defaults = dict(
bits=0,
type='rsa',
passphrase=None,
comment='ansible-generated on %s' % socket.gethostname()
)
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
name=dict(type='str', required=True, aliases=['user']),
uid=dict(type='str'),
non_unique=dict(type='bool', default=False),
group=dict(type='str'),
groups=dict(type='list'),
comment=dict(type='str'),
home=dict(type='path'),
shell=dict(type='str'),
password=dict(type='str', no_log=True),
login_class=dict(type='str'),
# following options are specific to selinux
seuser=dict(type='str'),
# following options are specific to userdel
force=dict(type='bool', default=False),
remove=dict(type='bool', default=False),
# following options are specific to useradd
create_home=dict(type='bool', default=True, aliases=['createhome']),
skeleton=dict(type='str'),
system=dict(type='bool', default=False),
# following options are specific to usermod
move_home=dict(type='bool', default=False),
append=dict(type='bool', default=False),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(type='int', default=ssh_defaults['bits']),
ssh_key_type=dict(type='str', default=ssh_defaults['type']),
ssh_key_file=dict(type='path'),
ssh_key_comment=dict(type='str', default=ssh_defaults['comment']),
ssh_key_passphrase=dict(type='str', no_log=True),
update_password=dict(type='str', default='always', choices=['always', 'on_create']),
expires=dict(type='float'),
local=dict(type='bool'),
),
supports_check_mode=True
)
user = User(module)
module.debug('User instantiated - platform %s' % user.platform)
if user.distribution:
module.debug('User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.create_user()
if module.check_mode:
result['system'] = user.name
else:
result['system'] = user.system
result['create_home'] = user.create_home
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists():
info = user.user_info()
if info is False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
result['uid'] = info[2]
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.create_home:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
# deal with ssh key
if user.sshkeygen:
# generate ssh key (note: this function is check mode aware)
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
module.exit_json(**result)
# import module snippets
if __name__ == '__main__':
main()
| gpl-3.0 | 9,207,479,960,453,178,000 | 32.972804 | 137 | 0.518195 | false |
Niharika29/bugtracker | bugtracker/bugform/migrations/0001_initial.py | 1 | 3830 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'BugModel'
db.create_table(u'bugform_bugmodel', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email', self.gf('django.db.models.fields.CharField')(max_length=200)),
('desc', self.gf('django.db.models.fields.CharField')(max_length=500)),
('date', self.gf('django.db.models.fields.DateField')()),
('loadtime', self.gf('django.db.models.fields.FloatField')()),
('os', self.gf('django.db.models.fields.CharField')(max_length=200)),
('browser', self.gf('django.db.models.fields.CharField')(max_length=200)),
('netspeed', self.gf('django.db.models.fields.FloatField')()),
('ip', self.gf('django.db.models.fields.CharField')(max_length=40)),
('city', self.gf('django.db.models.fields.CharField')(max_length=50)),
('country', self.gf('django.db.models.fields.CharField')(max_length=50)),
('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)),
('bugstatus', self.gf('django.db.models.fields.CharField')(max_length=100)),
('bugpriority', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'bugform', ['BugModel'])
# Adding model 'AdminModel'
db.create_table(u'bugform_adminmodel', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('username', self.gf('django.db.models.fields.CharField')(max_length=30)),
('password', self.gf('django.db.models.fields.CharField')(max_length=20)),
))
db.send_create_signal(u'bugform', ['AdminModel'])
def backwards(self, orm):
# Deleting model 'BugModel'
db.delete_table(u'bugform_bugmodel')
# Deleting model 'AdminModel'
db.delete_table(u'bugform_adminmodel')
models = {
u'bugform.adminmodel': {
'Meta': {'object_name': 'AdminModel'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'bugform.bugmodel': {
'Meta': {'object_name': 'BugModel'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'bugpriority': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'bugstatus': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'date': ('django.db.models.fields.DateField', [], {}),
'desc': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'loadtime': ('django.db.models.fields.FloatField', [], {}),
'netspeed': ('django.db.models.fields.FloatField', [], {}),
'os': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bugform'] | mit | -8,162,443,334,297,934,000 | 51.479452 | 92 | 0.574674 | false |
hip-odoo/odoo | addons/website_event_track/controllers/main.py | 5 | 6012 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import collections
import datetime
import pytz
from odoo import fields, http
from odoo.http import request
from odoo.tools import html_escape as escape, html2plaintext
class WebsiteEventTrackController(http.Controller):
@http.route(['''/event/<model("event.event"):event>/track/<model("event.track", "[('event_id','=',event[0])]"):track>'''], type='http', auth="public", website=True)
def event_track_view(self, event, track, **post):
track = track.sudo()
values = {'track': track, 'event': track.event_id, 'main_object': track}
return request.render("website_event_track.track_view", values)
def _prepare_calendar(self, event, event_track_ids):
local_tz = pytz.timezone(event.date_tz or 'UTC')
locations = {} # { location: [track, start_date, end_date, rowspan]}
dates = [] # [ (date, {}) ]
for track in event_track_ids:
locations.setdefault(track.location_id or False, [])
forcetr = True
for track in event_track_ids:
start_date = fields.Datetime.from_string(track.date).replace(tzinfo=pytz.utc).astimezone(local_tz)
end_date = start_date + datetime.timedelta(hours=(track.duration or 0.5))
location = track.location_id or False
locations.setdefault(location, [])
# New TR, align all events
if forcetr or (start_date>dates[-1][0]) or not location:
dates.append((start_date, {}, bool(location)))
for loc in locations.keys():
if locations[loc] and (locations[loc][-1][2] > start_date):
locations[loc][-1][3] += 1
elif not locations[loc] or locations[loc][-1][2] <= start_date:
locations[loc].append([False, locations[loc] and locations[loc][-1][2] or dates[0][0], start_date, 1])
dates[-1][1][loc] = locations[loc][-1]
forcetr = not bool(location)
# Add event
if locations[location] and locations[location][-1][1] > start_date:
locations[location][-1][3] -= 1
locations[location].append([track, start_date, end_date, 1])
dates[-1][1][location] = locations[location][-1]
return {
'locations': locations,
'dates': dates
}
@http.route(['''/event/<model("event.event", "[('show_tracks','=',1)]"):event>/agenda'''], type='http', auth="public", website=True)
def event_agenda(self, event, tag=None, **post):
days_tracks = collections.defaultdict(lambda: [])
for track in event.track_ids.sorted(lambda track: (track.date, bool(track.location_id))):
if not track.date:
continue
days_tracks[track.date[:10]].append(track)
days = {}
days_tracks_count = {}
for day, tracks in days_tracks.iteritems():
days_tracks_count[day] = len(tracks)
days[day] = self._prepare_calendar(event, tracks)
speakers = {}
for track in event.sudo().track_ids:
speakers_name = u" – ".join(track.speaker_ids.mapped('name'))
speakers[track.id] = speakers_name
return request.render("website_event_track.agenda", {
'event': event,
'days': days,
'days_nbr': days_tracks_count,
'speakers': speakers,
'tag': tag
})
@http.route([
'''/event/<model("event.event", "[('show_tracks','=',1)]"):event>/track''',
'''/event/<model("event.event", "[('show_tracks','=',1)]"):event>/track/tag/<model("event.track.tag"):tag>'''
], type='http', auth="public", website=True)
def event_tracks(self, event, tag=None, **post):
searches = {}
if tag:
searches.update(tag=tag.id)
tracks = event.track_ids.filtered(lambda track: tag in track.tag_ids)
else:
tracks = event.track_ids
values = {
'event': event,
'main_object': event,
'tracks': tracks,
'tags': event.tracks_tag_ids,
'searches': searches,
'html2plaintext': html2plaintext
}
return request.render("website_event_track.tracks", values)
@http.route(['''/event/<model("event.event", "[('show_track_proposal','=',1)]"):event>/track_proposal'''], type='http', auth="public", website=True)
def event_track_proposal(self, event, **post):
return request.render("website_event_track.event_track_proposal", {'event': event})
@http.route(['/event/<model("event.event"):event>/track_proposal/post'], type='http', auth="public", methods=['POST'], website=True)
def event_track_proposal_post(self, event, **post):
tags = []
for tag in event.allowed_track_tag_ids:
if post.get('tag_' + str(tag.id)):
tags.append(tag.id)
track = request.env['event.track'].sudo().create({
'name': post['track_name'],
'partner_name': post['partner_name'],
'partner_email': post['email_from'],
'partner_phone': post['phone'],
'partner_biography': escape(post['biography']),
'event_id': event.id,
'tag_ids': [(6, 0, tags)],
'user_id': False,
'description': escape(post['description'])
})
if request.env.user != request.website.user_id:
track.sudo().message_subscribe_users(user_ids=request.env.user.ids)
else:
partner = request.env['res.partner'].sudo().search([('email', '=', post['email_from'])])
if partner:
track.sudo().message_subscribe(partner_ids=partner.ids)
return request.render("website_event_track.event_track_proposal_success", {'track': track, 'event': event})
| agpl-3.0 | 4,715,310,819,447,169,000 | 44.18797 | 168 | 0.563561 | false |
davidsminor/gaffer | python/GafferImageUI/ImageTransformUI.py | 1 | 2002 | ##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferUI
import GafferImage
GafferUI.PlugValueWidget.registerCreator( GafferImage.ImageTransform.staticTypeId(), "transform", GafferUI.CompoundPlugValueWidget, collapsed=None )
| bsd-3-clause | -5,045,399,636,241,545,000 | 49.05 | 148 | 0.692807 | false |
pkleimert/hrpt | apps/pollster/models.py | 1 | 51517 | # -*- coding: utf-8 -*-
import warnings
from django.db import models, connection, transaction, IntegrityError, DatabaseError
from django.contrib.auth.models import User
from django.forms import ModelForm
from django.core.validators import RegexValidator
from cms.models import CMSPlugin
from xml.etree import ElementTree
from math import pi,cos,sin,log,exp,atan
from . import dynamicmodels, json
from .db.utils import get_db_type, convert_query_paramstyle
import os, re, shutil, warnings, datetime, csv
from django.conf import settings
DEG_TO_RAD = pi/180
RAD_TO_DEG = 180/pi
try:
import mapnik2 as mapnik
mapnik_version = 2
except:
try:
import mapnik
mapnik_version = 1
except ImportError:
mapnik_version = None
warnings.warn("No working version for library 'mapnik' found. Continuing without mapnik")
SURVEY_STATUS_CHOICES = (
('DRAFT', 'Draft'),
('PUBLISHED', 'Published'),
('UNPUBLISHED', 'Unpublished')
)
SURVEY_TRANSLATION_STATUS_CHOICES = (
('DRAFT', 'Draft'),
('PUBLISHED', 'Published')
)
CHART_STATUS_CHOICES = (
('DRAFT', 'Draft'),
('PUBLISHED', 'Published'),
)
QUESTION_TYPE_CHOICES = (
('builtin', 'Builtin'),
('text', 'Open Answer'),
('single-choice', 'Single Choice'),
('multiple-choice', 'Multiple Choice'),
('matrix-select', 'Matrix Select'),
('matrix-entry', 'Matrix Entry'),
)
CHART_SQLFILTER_CHOICES = (
('NONE', 'None'),
('USER', 'Current User'),
('PERSON', 'Current Person'),
)
IDENTIFIER_REGEX = r'^[a-zA-Z][a-zA-Z0-9_]*$'
IDENTIFIER_OPTION_REGEX = r'^[a-zA-Z0-9_]*$'
SURVEY_EXTRA_SQL = {
'postgresql': {
'weekly': [
"""DROP VIEW IF EXISTS pollster_health_status""",
"""CREATE VIEW pollster_health_status AS
SELECT id as pollster_results_weekly_id,
case true
when "Q1_0"
then 'NO-SYMPTOMS'
when ("Q5" = 0 or "Q6b" = 0)
and ("Q1_1" or "Q1_2" or "Q6d" = 3 or "Q6d" = 4 or "Q6d" = 5 or "Q1_11" or "Q1_8" or "Q1_9")
and ("Q1_5" or "Q1_6" or "Q1_7")
then 'ILI'
when
(
(not "Q1_1") and (not "Q1_2")
and (("Q6d" = 0) or ("Q6d" is null))
and ("Q1_3" or "Q1_4" or "Q1_14")
and ("Q11" = 2)
) and (
case true when "Q1_17" then 1 else 0 end +
case true when "Q1_15" then 1 else 0 end +
case true when "Q1_16" then 1 else 0 end +
case true when "Q1_18" then 1 else 0 end >= 2
) then 'ALLERGY-or-HAY-FEVER-and-GASTROINTESTINAL'
when (not "Q1_1") and (not "Q1_2")
and (("Q6d" = 0) or ("Q6d" is null))
and ("Q1_3" or "Q1_4" or "Q1_14")
and ("Q11" = 2)
then 'ALLERGY-or-HAY-FEVER'
when
(
case true when "Q1_3" then 1 else 0 end +
case true when "Q1_4" then 1 else 0 end +
case true when "Q1_6" then 1 else 0 end +
case true when "Q1_5" then 1 else 0 end >= 2
-- note: common cold after all allergy-related branches
) and (
case true when "Q1_17" then 1 else 0 end +
case true when "Q1_15" then 1 else 0 end +
case true when "Q1_16" then 1 else 0 end +
case true when "Q1_18" then 1 else 0 end >= 2
) then 'COMMON-COLD-and-GASTROINTESTINAL'
when
case true when "Q1_3" then 1 else 0 end +
case true when "Q1_4" then 1 else 0 end +
case true when "Q1_6" then 1 else 0 end +
case true when "Q1_5" then 1 else 0 end >= 2
-- note: common cold after all allergy-related branches
then 'COMMON-COLD'
when
case true when "Q1_17" then 1 else 0 end +
case true when "Q1_15" then 1 else 0 end +
case true when "Q1_16" then 1 else 0 end +
case true when "Q1_18" then 1 else 0 end >= 2
then 'GASTROINTESTINAL'
else 'NON-SPECIFIC-SYMPTOMS'
end as status
FROM pollster_results_weekly"""
]
},
'sqlite': {
'weekly': [
"""DROP VIEW IF EXISTS pollster_health_status""",
"""CREATE VIEW pollster_health_status AS
SELECT id as pollster_results_weekly_id,
case 1
when Q1_0
then 'NO-SYMPTOMS'
when (Q5 = 0 or Q6b = 0)
and (Q1_1 or Q1_2 or Q6d = 3 or Q6d = 4 or Q6d = 5 or Q1_11 or Q1_8 or Q1_9)
and (Q1_5 or Q1_6 or Q1_7)
then 'ILI'
when
(
(not Q1_1) and (not Q1_2)
and ((Q6d = 0) or (Q6d is null))
and (Q1_3 or Q1_4 or Q1_14)
and (Q11 = 2)
) and (
case true when Q1_17 then 1 else 0 end +
case true when Q1_15 then 1 else 0 end +
case true when Q1_16 then 1 else 0 end +
case true when Q1_18 then 1 else 0 end >= 2
) then 'ALLERGY-or-HAY-FEVER-and-GASTROINTESTINAL'
when (not Q1_1) and (not Q1_2)
and ((Q6d = 0) or (Q6d is null))
and (Q1_3 or Q1_4 or Q1_14)
and (Q11 = 2)
then 'ALLERGY-or-HAY-FEVER'
when
(
case true when Q1_3 then 1 else 0 end +
case true when Q1_4 then 1 else 0 end +
case true when Q1_6 then 1 else 0 end +
case true when Q1_5 then 1 else 0 end >= 2
-- note: common cold after all allergy-related branches
) and (
case true when Q1_17 then 1 else 0 end +
case true when Q1_15 then 1 else 0 end +
case true when Q1_16 then 1 else 0 end +
case true when Q1_18 then 1 else 0 end >= 2
) then 'COMMON-COLD-and-GASTROINTESTINAL'
when
case true when Q1_3 then 1 else 0 end +
case true when Q1_4 then 1 else 0 end +
case true when Q1_6 then 1 else 0 end +
case true when Q1_5 then 1 else 0 end >= 2
-- note: common cold after all allergy-related branches
then 'COMMON-COLD'
when
case true when Q1_17 then 1 else 0 end +
case true when Q1_15 then 1 else 0 end +
case true when Q1_16 then 1 else 0 end +
case true when Q1_18 then 1 else 0 end >= 2
then 'GASTROINTESTINAL'
else 'NON-SPECIFIC-SYMPTOMS'
end as status
FROM pollster_results_weekly"""
]
}
}
def _get_or_default(queryset, default=None):
r = queryset[0:1]
if r:
return r[0]
return default
class Survey(models.Model):
parent = models.ForeignKey('self', db_index=True, blank=True, null=True)
title = models.CharField(max_length=255, blank=True, default='')
shortname = models.SlugField(max_length=255, default='')
version = models.SlugField(max_length=255, blank=True, default='')
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
status = models.CharField(max_length=255, default='DRAFT', choices=SURVEY_STATUS_CHOICES)
form = None
translation_survey = None
_standard_result_fields =[
('user', models.IntegerField(null=True, blank=True, verbose_name="User")),
('global_id', models.CharField(max_length=36, null=True, blank=True, verbose_name="Person")),
('channel', models.CharField(max_length=36, null=True, blank=True, verbose_name="Channel"))
]
@staticmethod
def get_by_shortname(shortname):
return Survey.objects.all().get(shortname=shortname, status="PUBLISHED")
@property
def translated_title(self):
if self.translation and self.translation.title:
return self.translation.title
return self.title
@property
def is_draft(self):
return self.status == 'DRAFT'
@property
def is_published(self):
return self.status == 'PUBLISHED'
@property
def is_unpublished(self):
return self.status == 'UNPUBLISHED'
@property
def is_editable(self):
return self.is_draft or self.is_unpublished
@property
def questions(self):
for question in self.question_set.all():
question.set_form(self.form)
question.set_translation_survey(self.translation_survey)
yield question
@property
def translation(self):
return self.translation_survey
@models.permalink
def get_absolute_url(self):
return ('pollster_survey_edit', [str(self.id)])
def __unicode__(self):
return "Survey #%d %s" % (self.id, self.title)
def get_table_name(self):
if self.is_published and not self.shortname:
raise RuntimeError('cannot generate tables for surveys with no shortname')
return 'results_'+str(self.shortname)
def get_last_participation_data(self, user_id, global_id):
model = self.as_model()
participation = model.objects\
.filter(user=user_id)\
.filter(global_id = global_id)\
.order_by('-timestamp')\
.values()
return _get_or_default(participation)
def as_model(self):
fields = []
fields.extend(Survey._standard_result_fields)
for question in self.questions:
fields += question.as_fields()
model = dynamicmodels.create(self.get_table_name(), fields=dict(fields), app_label='pollster')
return model
def as_form(self):
model = self.as_model()
questions = list(self.questions)
def clean(self):
for question in questions:
if question.is_multiple_choice and question.is_mandatory:
valid = any([self.cleaned_data.get(d, False) for d in question.data_names])
if not valid:
self._errors[question.data_name] = self.error_class('At least one option should be selected')
return self.cleaned_data
form = dynamicmodels.to_form(model, {'clean': clean})
for question in questions:
if question.is_mandatory and question.data_name in form.base_fields:
form.base_fields[question.data_name].required = True
return form
def set_form(self, form):
self.form = form
def set_translation_survey(self, translation_survey):
self.translation_survey = translation_survey
def check(self):
errors = []
if not self.shortname:
errors.append('Missing survey shortname')
elif not re.match(IDENTIFIER_REGEX, self.shortname):
errors.append('Invalid survey shortname "%s"' % (self.shortname,))
for question in self.questions:
errors.extend(question.check())
return errors
def publish(self):
if self.is_published:
return None
errors = self.check()
if errors:
return errors
# Unpublish other surveys with the same shortname.
for o in Survey.objects.filter(shortname=self.shortname, status='PUBLISHED'):
o.unpublish()
self.status = 'PUBLISHED'
model = self.as_model()
table = model._meta.db_table
if table in connection.introspection.table_names():
now = datetime.datetime.now()
backup = table+'_vx_'+format(now, '%Y%m%d%H%M%s')
connection.cursor().execute('ALTER TABLE '+table+' RENAME TO '+backup)
dynamicmodels.install(model)
db = get_db_type(connection)
for extra_sql in SURVEY_EXTRA_SQL[db].get(self.shortname, []):
connection.cursor().execute(extra_sql)
self.save()
return None
def unpublish(self):
if not self.is_published:
return
table = self.as_model()._meta.db_table
if table in connection.introspection.table_names():
now = datetime.datetime.now()
version = self.version or 0
backup = table+'_v'+str(version)+'_'+format(now, '%Y%m%d%H%M%s')
connection.cursor().execute('ALTER TABLE '+table+' RENAME TO '+backup)
self.status = 'UNPUBLISHED'
self.save()
def write_csv(self, writer):
model = self.as_model()
fields = model._meta.fields
writer.writerow([field.verbose_name or field.name for field in fields])
for result in model.objects.all():
row = []
for field in fields:
val = getattr(result, field.name)
if callable(val):
val = val()
if type(val) is unicode:
val = val.encode('utf-8')
row.append(val)
writer.writerow(row)
class RuleType(models.Model):
title = models.CharField(max_length=255, blank=True, default='')
js_class = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return "RuleType #%d %s" % (self.id, self.title)
class QuestionDataType(models.Model):
title = models.CharField(max_length=255, blank=True, default='')
db_type = models.CharField(max_length=255)
css_class = models.CharField(max_length=255)
js_class = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return "QuestionDataType #%d %s" % (self.id, self.title)
def as_field_type(self, verbose_name=None, regex=None):
import django.db.models
import db.models
field = eval(self.db_type)
field.verbose_name = verbose_name
if regex:
field.validators.append(RegexValidator(regex=regex))
return field
@staticmethod
def default_type():
return QuestionDataType.objects.filter(title = 'Text')[0]
@staticmethod
def default_timestamp_type():
return QuestionDataType.objects.filter(title = 'Timestamp')[0]
@property
def is_internal(self):
return self.title == 'Timestamp'
class VirtualOptionType(models.Model):
title = models.CharField(max_length=255, blank=True, default='')
question_data_type = models.ForeignKey(QuestionDataType)
js_class = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return "VirtualOptionType #%d %s for %s" % (self.id, self.title, self.question_data_type.title)
class Question(models.Model):
survey = models.ForeignKey(Survey, db_index=True)
starts_hidden = models.BooleanField(default=False)
is_mandatory = models.BooleanField(default=False)
ordinal = models.IntegerField()
title = models.CharField(max_length=255, blank=True, default='')
description = models.TextField(blank=True, default='')
type = models.CharField(max_length=255, choices=QUESTION_TYPE_CHOICES)
data_type = models.ForeignKey(QuestionDataType)
open_option_data_type = models.ForeignKey(QuestionDataType, related_name="questions_with_open_option", null=True, blank=True)
data_name = models.CharField(max_length=255)
visual = models.CharField(max_length=255, blank=True, default='')
tags = models.CharField(max_length=255, blank=True, default='')
regex = models.CharField(max_length=1023, blank=True, default='')
error_message = models.TextField(blank=True, default='')
form = None
translation_survey = None
translation_question = None
@property
def translated_title(self):
if self.translation and self.translation.title:
return self.translation.title
return self.title
@property
def translated_description(self):
if self.translation and self.translation.description:
return self.translation.description
return self.description
@property
def translated_error_message(self):
if self.translation and self.translation.error_message:
return self.translation.error_message
return self.error_message
@property
def errors(self):
if not self.form:
return {}
errors = [(data_name, self.form.errors[data_name]) for data_name in self.data_names if data_name in self.form.errors]
if self.is_multiple_choice and self.data_name in self.form.errors:
errors.append((self.data_name, self.form.errors[self.data_name]))
return dict(errors)
@property
def rows(self):
for row in self.row_set.all():
row.set_translation_survey(self.translation_survey)
yield row
@property
def columns(self):
for column in self.column_set.all():
column.set_translation_survey(self.translation_survey)
yield column
@property
def rows_columns(self):
for row in self.rows:
yield (row, self._columns_for_row(row))
def _columns_for_row(self, row):
for column in self.columns:
column.set_row(row)
yield column
@property
def data_names(self):
return [data_name for data_name, data_type in self.as_fields()]
@property
def options(self):
for option in self.option_set.all():
option.set_form(self.form)
option.set_translation_survey(self.translation_survey)
yield option
@property
def translation(self):
return self.translation_question
@property
def css_classes(self):
c = ['question', 'question-'+self.type, self.data_type.css_class]
if self.starts_hidden:
c.append('starts-hidden')
if self.is_mandatory:
c.append('mandatory')
if self.errors:
c.append('error')
return c
@property
def form_value(self):
if not self.form:
return ''
return self.form.data.get(self.data_name, '')
@property
def is_builtin(self):
return self.type == 'builtin'
@property
def is_text(self):
return self.type == 'text'
@property
def is_single_choice(self):
return self.type == 'single-choice'
@property
def is_multiple_choice(self):
return self.type == 'multiple-choice'
@property
def is_matrix_select(self):
return self.type == 'matrix-select'
@property
def is_matrix_entry(self):
return self.type == 'matrix-entry'
@property
def is_visual_dropdown(self):
return self.visual == 'dropdown'
def __unicode__(self):
return "Question #%d %s" % (self.id, self.title)
class Meta:
ordering = ['survey', 'ordinal']
def data_name_for_row_column(self, row, column):
return '%s_multi_row%d_col%d' % (self.data_name, row.ordinal, column.ordinal)
def as_fields(self):
fields = []
if self.type == 'builtin':
fields = [ (self.data_name, self.data_type.as_field_type(verbose_name=self.title)) ]
elif self.type == 'text':
fields = [ (self.data_name, self.data_type.as_field_type(verbose_name=self.title, regex=self.regex)) ]
elif self.type == 'single-choice':
open_option_data_type = self.open_option_data_type or self.data_type
fields = [ (self.data_name, self.data_type.as_field_type(verbose_name=self.title)) ]
for open_option in [o for o in self.option_set.all() if o.is_open]:
title_open = "%s: %s Open Answer" % (self.title, open_option.value)
fields.append( (open_option.open_option_data_name, open_option_data_type.as_field_type(verbose_name=title_open)) )
elif self.type == 'multiple-choice':
fields = []
for option in self.option_set.all():
title = "%s: %s" % (self.title, option.value)
fields.append( (option.data_name, models.BooleanField(verbose_name=title)) )
if option.is_open:
title_open = "%s: %s Open Answer" % (self.title, option.value)
fields.append( (option.open_option_data_name, option.open_option_data_type.as_field_type(verbose_name=title_open)) )
elif self.type in ('matrix-select', 'matrix-entry'):
fields = []
for row, columns in self.rows_columns:
for column in columns:
r = row.title or ("row %d" % row.ordinal)
c = column.title or ("column %d" % column.ordinal)
title = "%s (%s, %s)" % (self.title, r, c)
fields.append( (column.data_name, self.data_type.as_field_type(verbose_name=title)) )
else:
raise NotImplementedError(self.type)
return fields
def set_form(self, form):
self.form = form
def set_translation_survey(self, translation_survey):
self.translation_survey = translation_survey
if translation_survey:
r = translation_survey.translationquestion_set.all().filter(question=self)
default = TranslationQuestion(translation = translation_survey, question=self)
self.translation_question = _get_or_default(r, default)
def check(self):
errors = []
if not self.data_name:
errors.append('Missing data name for question "%s"' % (self.title, ))
elif not re.match(IDENTIFIER_REGEX, self.data_name):
errors.append('Invalid data name "%s" for question "%s"' % (self.data_name, self.title))
values = {}
for option in self.options:
errors.extend(option.check())
values[option.value] = values.get(option.value, 0) + 1
if self.type == 'multiple-choice':
dups = [val for val, count in values.items() if count > 1]
for dup in dups:
errors.append('Duplicated value %s in question %s' % (dup, self.title))
return errors
class QuestionRow(models.Model):
question = models.ForeignKey(Question, related_name="row_set", db_index=True)
ordinal = models.IntegerField()
title = models.CharField(max_length=255, blank=True, default='')
translation_survey = None
translation_row = None
class Meta:
ordering = ['question', 'ordinal']
def __unicode__(self):
return "QuestionRow #%d %s" % (self.id, self.title)
@property
def translated_title(self):
if self.translation and self.translation.title:
return self.translation.title
return self.title
@property
def translation(self):
return self.translation_row
def set_translation_survey(self, translation_survey):
self.translation_survey = translation_survey
if translation_survey:
r = translation_survey.translationquestionrow_set.all().filter(row=self)
default = TranslationQuestionRow(translation = translation_survey, row=self)
self.translation_row = _get_or_default(r, default)
class QuestionColumn(models.Model):
question = models.ForeignKey(Question, related_name="column_set", db_index=True)
ordinal = models.IntegerField()
title = models.CharField(max_length=255, blank=True, default='')
translation_survey = None
translation_column = None
row = None
class Meta:
ordering = ['question', 'ordinal']
def __unicode__(self):
return "QuestionColumn #%d %s" % (self.id, self.title)
@property
def translated_title(self):
if self.translation and self.translation.title:
return self.translation.title
return self.title
@property
def translation(self):
return self.translation_column
def set_translation_survey(self, translation_survey):
self.translation_survey = translation_survey
if translation_survey:
r = translation_survey.translationquestioncolumn_set.all().filter(column=self)
default = TranslationQuestionColumn(translation = translation_survey, column=self)
self.translation_column = _get_or_default(r, default)
def set_row(self, row):
self.row = row
@property
def options(self):
for option in self.question.options:
if option.row and option.row != self.row:
continue
if option.column and option.column != self:
continue
option.set_row_column(self.row, self)
option.set_translation_survey(self.translation_survey)
# TODO: We need a form to reset the selects to user's values.
# option.set_form(self.form)
yield option
@property
def data_name(self):
if not self.row:
raise NotImplementedError('use Question.rows_columns() to get the right data_name here')
return self.question.data_name_for_row_column(self.row, self)
class Option(models.Model):
question = models.ForeignKey(Question, db_index=True)
clone = models.ForeignKey('self', db_index=True, blank=True, null=True)
row = models.ForeignKey(QuestionRow, blank=True, null=True)
column = models.ForeignKey(QuestionColumn, blank=True, null=True)
is_virtual = models.BooleanField(default=False)
is_open = models.BooleanField(default=False)
starts_hidden = models.BooleanField(default=False)
ordinal = models.IntegerField()
text = models.CharField(max_length=4095, blank=True, default='')
group = models.CharField(max_length=255, blank=True, default='')
value = models.CharField(max_length=255, default='')
description = models.TextField(blank=True, default='')
virtual_type = models.ForeignKey(VirtualOptionType, blank=True, null=True)
virtual_inf = models.CharField(max_length=255, blank=True, default='')
virtual_sup = models.CharField(max_length=255, blank=True, default='')
virtual_regex = models.CharField(max_length=255, blank=True, default='')
form = None
translation_survey = None
translation_option = None
current_row_column = (None, None)
@property
def translated_text(self):
if self.translation and self.translation.text:
return self.translation.text
return self.text
@property
def translated_description(self):
if self.translation and self.translation.description:
return self.translation.description
return self.description
@property
def data_name(self):
if self.question.type in ('text', 'single-choice'):
return self.question.data_name
elif self.question.type == 'multiple-choice':
return self.question.data_name+'_'+self.value
elif self.question.type in ('matrix-select', 'matrix-entry'):
row = self.row or self.current_row_column[0]
column = self.column or self.current_row_column[1]
return self.question.data_name_for_row_column(row, column)
else:
raise NotImplementedError(self.question.type)
@property
def translation(self):
return self.translation_option
@property
def open_option_data_name(self):
return self.question.data_name+'_'+self.value+'_open'
@property
def open_option_data_type(self):
return self.question.open_option_data_type or self.question.data_type
def __unicode__(self):
return 'Option #%d %s' % (self.id, self.value)
class Meta:
ordering = ['question', 'ordinal']
@property
def form_value(self):
if not self.form:
return ''
return self.form.data.get(self.data_name, '')
@property
def open_option_data_form_value(self):
if not self.form:
return ''
return self.form.data.get(self.open_option_data_name, '')
@property
def form_is_checked(self):
if self.question.type in ('text', 'single-choice'):
return self.form_value == self.value
elif self.question.type == 'multiple-choice':
return bool(self.form_value)
elif self.question.type in ('matrix-select', 'matrix-entry'):
return self.form_value == self.value
else:
raise NotImplementedError(self.question.type)
def set_form(self, form):
self.form = form
def set_translation_survey(self, translation_survey):
self.translation_survey = translation_survey
if translation_survey:
r = translation_survey.translationoption_set.all().filter(option=self)
default = TranslationOption(translation = translation_survey, option=self)
self.translation_option = _get_or_default(r, default)
def set_row_column(self, row, column):
self.current_row_column = (row, column)
def check(self):
errors = []
if self.is_virtual:
if not self.virtual_inf and not self.virtual_sup and not self.virtual_regex:
errors.append('Missing parameters for derived value in question "%s"' % (self.question.title, ))
else:
if not self.text:
errors.append('Empty text for option in question "%s"' % (self.question.title, ))
if not self.value:
errors.append('Missing value for option "%s" in question "%s"' % (self.text, self.question.title))
elif self.question.type == 'multiple-choice' and not re.match(IDENTIFIER_OPTION_REGEX, self.value):
errors.append('Invalid value "%s" for option "%s" in question "%s"' % (self.value, self.text, self.question.title))
return errors
class Rule(models.Model):
rule_type = models.ForeignKey(RuleType)
is_sufficient = models.BooleanField(default=True)
subject_question = models.ForeignKey(Question, related_name='subject_of_rules', db_index=True)
subject_options = models.ManyToManyField(Option, related_name='subject_of_rules', limit_choices_to = {'question': subject_question})
object_question = models.ForeignKey(Question, related_name='object_of_rules', blank=True, null=True)
object_options = models.ManyToManyField(Option, related_name='object_of_rules', limit_choices_to = {'question': object_question})
def js_class(self):
return self.rule_type.js_class
def __unicode__(self):
return 'Rule #%d' % (self.id)
# I18n models
class TranslationSurvey(models.Model):
survey = models.ForeignKey(Survey, db_index=True)
language = models.CharField(max_length=3, db_index=True)
title = models.CharField(max_length=255, blank=True, default='')
status = models.CharField(max_length=255, default='DRAFT', choices=SURVEY_TRANSLATION_STATUS_CHOICES)
class Meta:
verbose_name = 'Translation'
ordering = ['survey', 'language']
unique_together = ('survey', 'language')
@models.permalink
def get_absolute_url(self):
return ('pollster_survey_translation_edit', [str(self.survey.id), self.language])
def __unicode__(self):
return "TranslationSurvey(%s) for %s" % (self.language, self.survey)
def as_form(self, data=None):
class TranslationSurveyForm(ModelForm):
class Meta:
model = TranslationSurvey
fields = ['title', 'status']
return TranslationSurveyForm(data, instance=self, prefix="survey")
class TranslationQuestion(models.Model):
translation = models.ForeignKey(TranslationSurvey, db_index=True)
question = models.ForeignKey(Question, db_index=True)
title = models.CharField(max_length=255, blank=True, default='')
description = models.TextField(blank=True, default='')
error_message = models.TextField(blank=True, default='')
class Meta:
ordering = ['translation', 'question']
unique_together = ('translation', 'question')
def __unicode__(self):
return "TranslationQuestion(%s) for %s" % (self.translation.language, self.question)
def as_form(self, data=None):
class TranslationQuestionForm(ModelForm):
class Meta:
model = TranslationQuestion
fields = ['title', 'description', 'error_message']
return TranslationQuestionForm(data, instance=self, prefix="question_%s"%(self.id,))
class TranslationQuestionRow(models.Model):
translation = models.ForeignKey(TranslationSurvey, db_index=True)
row = models.ForeignKey(QuestionRow, db_index=True)
title = models.CharField(max_length=255, blank=True, default='')
class Meta:
ordering = ['translation', 'row']
unique_together = ('translation', 'row')
def __unicode__(self):
return "TranslationQuestionRow(%s) for %s" % (self.translation.language, self.row)
def as_form(self, data=None):
class TranslationRowForm(ModelForm):
class Meta:
model = TranslationQuestionRow
fields = ['title']
return TranslationRowForm(data, instance=self, prefix="row_%s"%(self.id,))
class TranslationQuestionColumn(models.Model):
translation = models.ForeignKey(TranslationSurvey, db_index=True)
column = models.ForeignKey(QuestionColumn, db_index=True)
title = models.CharField(max_length=255, blank=True, default='')
class Meta:
ordering = ['translation', 'column']
unique_together = ('translation', 'column')
def __unicode__(self):
return "TranslationQuestionColumn(%s) for %s" % (self.translation.language, self.column)
def as_form(self, data=None):
class TranslationColumnForm(ModelForm):
class Meta:
model = TranslationQuestionColumn
fields = ['title']
return TranslationColumnForm(data, instance=self, prefix="column_%s"%(self.id,))
class TranslationOption(models.Model):
translation = models.ForeignKey(TranslationSurvey, db_index=True)
option = models.ForeignKey(Option, db_index=True)
text = models.CharField(max_length=4095, blank=True, default='')
description = models.TextField(blank=True, default='')
class Meta:
ordering = ['translation', 'option']
unique_together = ('translation', 'option')
def __unicode__(self):
return "TranslationOption(%s) for %s" % (self.translation.language, self.option)
def as_form(self, data=None):
class TranslationOptionForm(ModelForm):
class Meta:
model = TranslationOption
fields = ['text', 'description']
return TranslationOptionForm(data, instance=self, prefix="option_%s"%(self.id,))
class ChartType(models.Model):
shortname = models.SlugField(max_length=255, unique=True)
description = models.CharField(max_length=255)
def __unicode__(self):
return self.description or self.shortname
class Chart(models.Model):
survey = models.ForeignKey(Survey, db_index=True)
type = models.ForeignKey(ChartType, db_index=True)
shortname = models.SlugField(max_length=255)
chartwrapper = models.TextField(blank=True, default='')
sqlsource = models.TextField(blank=True, default='', verbose_name="SQL Source Query")
sqlfilter = models.CharField(max_length=255, default='NONE', choices=CHART_SQLFILTER_CHOICES, verbose_name="Results Filter")
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
status = models.CharField(max_length=255, default='DRAFT', choices=CHART_STATUS_CHOICES)
geotable = models.CharField(max_length=255, default='pollster_zip_codes', choices=settings.GEOMETRY_TABLES)
class Meta:
ordering = ['survey', 'shortname']
unique_together = ('survey', 'shortname')
def __unicode__(self):
return "Chart %s for %s" % (self.shortname, self.survey)
@models.permalink
def get_absolute_url(self):
return ('pollster_survey_chart_edit', [str(self.survey.id), self.shortname])
@property
def is_draft(self):
return self.status == 'DRAFT'
@property
def is_published(self):
return self.status == 'PUBLISHED'
@property
def has_data(self):
if not self.sqlsource:
return False
else:
return True
def to_json(self, user_id, global_id):
data = {}
if self.type.shortname == "google-charts":
data[ "chartType"] = "Table"
if self.chartwrapper:
data = json.loads(self.chartwrapper)
descriptions, cells = self.load_data(user_id, global_id)
cols = [{"id": desc[0], "label": desc[0], "type": "number"} for desc in descriptions]
rows = [{"c": [{"v": v} for v in c]} for c in cells]
data["dataTable"] = { "cols": cols, "rows": rows }
else:
if self.chartwrapper:
data["bounds"] = json.loads(self.chartwrapper)
try:
shortname = settings.POLLSTER_USER_PROFILE_SURVEY
survey = Survey.objects.get(shortname=shortname, status='PUBLISHED')
lpd = survey.get_last_participation_data(user_id, global_id)
if lpd and hasattr(settings, 'POLLSTER_USER_ZIP_CODE_DATA_NAME'):
zip_code = lpd.get(settings.POLLSTER_USER_ZIP_CODE_DATA_NAME)
if zip_code is not None:
zip_code = str(zip_code).upper()
country = None
if hasattr(settings, 'POLLSTER_USER_COUNTRY_DATA_NAME'):
country = lpd.get(settings.POLLSTER_USER_COUNTRY_DATA_NAME)
if country is not None:
country = str(country).upper()
data["center"] = self.load_zip_coords(zip_code, country)
except:
pass
return json.dumps(data)
def get_map_click(self, lat, lng):
result = {}
skip_cols = ("ogc_fid", "color", "geometry")
description, data = self.load_info(lat, lng)
if data and len(data) > 0:
for i in range(len(data[0])):
if description[i][0] not in skip_cols:
result[description[i][0]] = str(data[0][i])
return json.dumps(result)
def get_map_tile(self, user_id, global_id, z, x, y):
filename = self.get_map_tile_filename(z, x, y)
if self.sqlfilter == "USER" and user_id:
filename = filename + "_user_" + str(user_id)
elif self.sqlfilter == "PERSON" and global_id:
filename = filename + "_gid_" + global_id
if not os.path.exists(filename):
self.generate_map_tile(self.generate_mapnik_map(user_id, global_id), filename, z, x, y)
return open(filename).read()
def generate_map_tile(self, m, filename, z, x, y):
# Code taken from OSM generate_tiles.py
proj = GoogleProjection()
mprj = mapnik.Projection(m.srs)
p0 = (x * 256, (y + 1) * 256)
p1 = ((x + 1) * 256, y * 256)
l0 = proj.fromPixelToLL(p0, z);
l1 = proj.fromPixelToLL(p1, z);
c0 = mprj.forward(mapnik.Coord(l0[0], l0[1]))
c1 = mprj.forward(mapnik.Coord(l1[0], l1[1]))
if hasattr(mapnik,'mapnik_version') and mapnik.mapnik_version() >= 800:
bbox = mapnik.Box2d(c0.x, c0.y, c1.x, c1.y)
else:
bbox = mapnik.Envelope(c0.x, c0.y, c1.x, c1.y)
m.resize(256, 256)
m.zoom_to_box(bbox)
im = mapnik.Image(256, 256)
mapnik.render(m, im)
# See https://github.com/mapnik/mapnik/wiki/OutputFormats for output
# formats and special parameters. The default here is 32 bit PNG with 8
# bit per component and alpha channel.
if mapnik_version == 2:
im.save(str(filename), "png32")
else:
im.save(str(filename), "png")
def generate_mapnik_map(self, user_id, global_id):
m = mapnik.Map(256, 256)
style = self.generate_mapnik_style(user_id, global_id)
m.background = mapnik.Color("transparent")
m.append_style("ZIP_CODES STYLE", style)
m.srs = "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs +over"
layer = mapnik.Layer('ZIP_CODES')
layer.datasource = self.create_mapnik_datasource(user_id, global_id)
layer.styles.append("ZIP_CODES STYLE")
m.layers.append(layer)
return m
def generate_mapnik_style(self, user_id, global_id):
style = mapnik.Style()
for color in self.load_colors(user_id, global_id):
# If the color can't be parsed, use red.
try:
c = mapnik.Color(str(color))
except:
c = mapnik.Color('#ff0000')
line = mapnik.LineSymbolizer(c, 1.5)
line.stroke.opacity = 0.7
poly = mapnik.PolygonSymbolizer(c)
poly.fill_opacity = 0.5
rule = mapnik.Rule()
rule.filter = mapnik.Filter(str("[color] = '%s'" % (color,)))
rule.symbols.extend([poly,line])
style.rules.append(rule)
return style
def create_mapnik_datasource(self, user_id, global_id):
# First create the SQL query that is a join between pollster_zip_codes and
# the chart query as created by the user; then create an appropriate datasource.
if global_id and re.findall('[^0-9A-Za-z-]', global_id):
raise Exception("invalid global_id "+global_id)
table = """SELECT * FROM %s""" % (self.get_view_name(),)
if self.sqlfilter == 'USER' :
table += """ WHERE "user" = %d""" % (user_id,)
elif self.sqlfilter == 'PERSON':
table += """ WHERE "user" = %d AND global_id = '%s'""" % (user_id, global_id)
table = "(" + table + ") AS ZIP_CODES"
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.sqlite3":
name = settings.DATABASES["default"]["NAME"]
return mapnik.SQLite(file=filename, wkb_format="spatialite",
geometry_field="geometry", estimate_extent=False, table=table)
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql_psycopg2":
name = settings.DATABASES["default"]["NAME"]
host = settings.DATABASES["default"]["HOST"]
port = settings.DATABASES["default"]["PORT"]
username = settings.DATABASES["default"]["USER"]
password = settings.DATABASES["default"]["PASSWORD"]
return mapnik.PostGIS(host=host, port=port, user=username, password=password, dbname=name,
geometry_field="geometry", estimate_extent=False, table=table)
def get_map_tile_base(self):
return "%s/_pollster_tile_cache/survey_%s/%s" % (settings.POLLSTER_CACHE_PATH, self.survey.id, self.shortname)
def get_map_tile_filename(self, z, x, y):
filename = "%s/%s/%s_%s" % (self.get_map_tile_base(), z, x, y)
pathname = os.path.dirname(filename)
if not os.path.exists(pathname):
try:
os.makedirs(pathname)
except OSError:
# Another thread created the directory in the meantime: just go on.
pass
return filename
def clear_map_tile_cache(self):
try:
shutil.rmtree(self.get_map_tile_base())
except:
pass
def get_table_name(self):
return 'pollster_charts_'+str(self.survey.shortname)+'_'+str(self.shortname)
def get_view_name(self):
return self.get_table_name() + "_view"
def update_table(self):
table_query = self.sqlsource
geo_table = self.geotable
if table_query:
table = self.get_table_name()
view = self.get_view_name()
if re.search(r'\bzip_code_country\b', table_query):
view_query = """SELECT A.*, B.id AS OGC_FID, B.geometry
FROM %s B, (SELECT * FROM %s) A
WHERE upper(A.zip_code_key) = upper(B.zip_code_key)
AND upper(A.zip_code_country) = upper(B.country)""" % (geo_table, table,)
else:
view_query = """SELECT A.*, B.id AS OGC_FID, B.geometry
FROM %s B, (SELECT * FROM %s) A
WHERE upper(A.zip_code_key) = upper(B.zip_code_key)""" % (geo_table, table,)
cursor = connection.cursor()
#try:
cursor.execute("DROP VIEW IF EXISTS %s" % (view,))
cursor.execute("DROP TABLE IF EXISTS %s" % (table,))
cursor.execute("CREATE TABLE %s AS %s" % (table, table_query))
if self.type.shortname != 'google-charts':
cursor.execute("CREATE VIEW %s AS %s" % (view, view_query))
transaction.commit_unless_managed()
self.clear_map_tile_cache()
return True
#except IntegrityError:
# return False
#except DatabaseError:
# return False
return False
def update_data(self):
table_query = self.sqlsource
if table_query:
table = self.get_table_name()
cursor = connection.cursor()
try:
cursor.execute("DELETE FROM %s" % (table,))
cursor.execute("INSERT INTO %s %s" % (table, table_query))
transaction.commit_unless_managed()
self.clear_map_tile_cache()
return True
except IntegrityError:
return False
except DatabaseError:
return False
return False
def load_data(self, user_id, global_id):
table = self.get_table_name()
query = "SELECT * FROM %s" % (table,)
if self.sqlfilter == 'USER' :
query += """ WHERE "user" = %(user_id)s"""
elif self.sqlfilter == 'PERSON':
query += """ WHERE "user" = %(user_id)s AND global_id = %(global_id)s"""
params = { 'user_id': user_id, 'global_id': global_id }
query = convert_query_paramstyle(connection, query, params)
try:
cursor = connection.cursor()
cursor.execute(query, params)
return (cursor.description, cursor.fetchall())
except DatabaseError, e:
return ((('Error',),), ((str(e),),))
def load_colors(self, user_id, global_id):
table = self.get_table_name()
query = """SELECT DISTINCT color FROM %s""" % (table,)
if self.sqlfilter == 'USER' :
query += """ WHERE "user" = %(user_id)s"""
elif self.sqlfilter == 'PERSON':
query += """ WHERE "user" = %(user_id)s AND global_id = %(global_id)s"""
params = { 'user_id': user_id, 'global_id': global_id }
query = convert_query_paramstyle(connection, query, params)
try:
cursor = connection.cursor()
cursor.execute(query, params)
return [x[0] for x in cursor.fetchall()]
except DatabaseError, e:
# If the SQL query is wrong we just return 'red'. We don't try to pop
# up a warning because this probably is an async Javascript call: the
# query error should be shown by the map editor.
return ['#ff0000']
def load_info(self, lat, lng):
view = self.get_view_name()
query = "SELECT * FROM %s WHERE ST_Contains(geometry, 'SRID=4326;POINT(%%s %%s)')" % (view,)
try:
cursor = connection.cursor()
cursor.execute(query, (lng, lat))
return (cursor.description, cursor.fetchall())
except DatabaseError, e:
return (None, [])
def load_zip_coords(self, zip_code_key, zip_code_country=None):
geo_table = self.geotable
if zip_code_country:
query = """SELECT ST_Y(ST_Centroid(geometry)) AS lat, ST_X(ST_Centroid(geometry)) AS lng
FROM """ + geo_table + """ WHERE zip_code_key = %s AND country = %s"""
args = (zip_code_key, zip_code_country)
else:
query = """SELECT ST_Y(ST_Centroid(geometry)) AS lat, ST_X(ST_Centroid(geometry)) AS lng
FROM """ + geo_table + """ WHERE zip_code_key = %s"""
args = (zip_code_key,)
try:
cursor = connection.cursor()
cursor.execute(query, args)
data = cursor.fetchall()
if len(data) > 0:
return {"lat": data[0][0], "lng": data[0][1]}
else:
return {}
except DatabaseError, e:
return {}
class GoogleProjection:
def __init__(self, levels=25):
self.Bc = []
self.Cc = []
self.zc = []
self.Ac = []
c = 256
for d in range(0,levels):
e = c/2;
self.Bc.append(c/360.0)
self.Cc.append(c/(2 * pi))
self.zc.append((e,e))
self.Ac.append(c)
c *= 2
def fromLLtoPixel(self,ll,zoom):
d = self.zc[zoom]
e = round(d[0] + ll[0] * self.Bc[zoom])
f = min(max(sin(DEG_TO_RAD * ll[1]),-0.9999),0.9999)
g = round(d[1] + 0.5*log((1+f)/(1-f))*-self.Cc[zoom])
return (e,g)
def fromPixelToLL(self,px,zoom):
e = self.zc[zoom]
f = (px[0] - e[0])/self.Bc[zoom]
g = (px[1] - e[1])/-self.Cc[zoom]
h = RAD_TO_DEG * ( 2 * atan(exp(g)) - 0.5 * pi)
return (f,h)
class SurveyChartPlugin(CMSPlugin):
chart = models.ForeignKey(Chart)
| agpl-3.0 | 7,208,818,112,332,948,000 | 38.506902 | 136 | 0.569987 | false |
sigurdga/samklang-blog | samklang_blog/models.py | 1 | 2547 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User, Group
from django.contrib.sites.models import Site
from taggit.managers import TaggableManager
from samklang_utils import markdown, slugify
from samklang_utils.managers import PermissionManager
from samklang_blog.managers import LiveEntryManager
class Entry(models.Model):
"""
Blog Entry.
Specify which image class to use in settings.
Example:
BLOG_IMAGE_MODEL = { 'app': 'blog', 'model': 'image' }
"""
# core fields
title = models.CharField(_("title"), max_length=50, help_text=_("Maximum 50 characters."))
body = models.TextField(_("contents"), help_text=_("Content written in markdown syntax."))
pub_date = models.DateTimeField(blank=True, null=True, verbose_name=_("publish date"), help_text=_("Date from which the entry is shown live. Blank = draft."))
pub_enddate = models.DateTimeField(blank=True, null=True, verbose_name=_("withdrawn date"), help_text=_("Date from which the entry is no longer accessible. Blank = live forever."))
updated_date = models.DateTimeField(auto_now=True)
# fields to store generated html
body_html = models.TextField(editable=False, blank=True)
# metadata
site = models.ForeignKey(Site, verbose_name=_('site'))
user = models.ForeignKey(User, verbose_name=_('user'))
group = models.ForeignKey(Group, verbose_name=_('created for'), null=True, blank=True)
slug = models.SlugField(unique_for_date='pub_date', help_text=_("Suggested value generated from title. Must be unique."))
tags = TaggableManager(_('tags'), blank=True, help_text=_('A comma-separated list of tags.'))
# managers, first one is default
objects = PermissionManager()
live = LiveEntryManager()
class Meta:
ordering = ['-pub_date']
verbose_name_plural = _("entries")
db_table = 'samklang_blog_entry'
def save(self, *args, **kwargs):
# convert markdown to html and store it
self.body_html = markdown(self.body)
self.slug = slugify(self.title)
super(Entry, self).save(*args, **kwargs)
def __unicode__(self):
return u"%s, %s" % (self.title, self.body[0:50])
@models.permalink
def get_absolute_url(self):
return ('blog-entry-detail', (), {
'year': self.pub_date.strftime("%Y"),
'month': self.pub_date.strftime("%m"),
'day': self.pub_date.strftime("%d"),
'slug': self.slug
})
| agpl-3.0 | -6,131,094,714,866,915,000 | 40.754098 | 184 | 0.663133 | false |
uknomecarlos/SMARTAlarmSW | PutTogether/main.py | 1 | 4200 | from PointOfInterest import *
from path import *
from xbee import ZigBee
from WirelessFunctions import *
import time
import serial
import Queue
import termios, fcntl, sys, os
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
# on the Raspberry Pi the serial port is ttyAMA0
PORT = '/dev/serial0'
#PORT = '/dev/ttyAMA0'
BAUD_RATE = 9600
# Open serial port
ser = serial.Serial(PORT, BAUD_RATE)
# Set up a queue to hold all incoming packets
packets = Queue.Queue()
# Create array of POI and set up alarm layout
def set_poi(my_poi, vis, num_alarm, num_exit):
# set up POI as wall, it's a placeholder, to place walls adjacent to alarms
my_poi += [PointOfInterest(0, "wall")]
# Set up five POIs as type alarm
for i in range(1, num_alarm + 1):
temp_poi = PointOfInterest(i, "alarm")
my_poi += [temp_poi]
# Set up 2 POIs as type exit
for i in range(num_alarm + 1, num_alarm + num_exit + 1):
my_poi += [PointOfInterest(i, "exit")]
for poi in my_poi:
vis += [False]
# sts the wireless addresses for all 5 alarms
def set_addresses(all_pois):
# This is set according to the address of the xbee router
# connected to each alarm
all_pois[1].set_address('\x00\x13\xa2\x00\x41\x67\x19\x2c')
all_pois[2].set_address('\x00\x13\xa2\x00\x41\x74\x20\xa9')
all_pois[3].set_address('\x00\x13\xa2\x00\x41\x74\x20\xb1')
all_pois[4].set_address('\x00\x13\xa2\x00\x41\x74\x20\xa5')
all_pois[5].set_address('\x00\x13\xa2\x00\x41\x74\x20\xbb')
# this is a call back function. When a message
# comes in this function will get the data
def message_received(data):
packets.put(data, block=False)
print 'gotta packet'
def test_case1(zb):
# 5 alarms, Straight line, exit on either side, middle alarm (3) detects fire
# From Fig. 46 (p. 116) in "SMART Alarm System SD1 Documentation.pdf"
all_pois = [] # list containing every POI in system
visited = [] # a boolean value corresponding to each POI and it's ID
set_poi(all_pois, visited, 5, 2)
wall = all_pois[0]
set_addresses(all_pois)
# initialize the connections from one alarm to another
all_pois[1].set_all(all_pois[6], wall, all_pois[2])
all_pois[2].set_all(all_pois[1], wall, all_pois[3])
all_pois[3].set_all(all_pois[2], wall, all_pois[4])
all_pois[4].set_all(all_pois[3], wall, all_pois[5])
all_pois[5].set_all(all_pois[4], wall, all_pois[7])
print " "
print "Test Case 1: "
print " "
try:
while True:
try:
time.sleep(0.1)
if packets.qsize() > 0:
print "got somethin"
# got a packet from recv thread
# See, the receive thread gets them
# puts them on a queue and here is
# where I pick them off to use
newPacket = packets.get_nowait()
# now go dismantle the packet
# and use it.
handlePacket(newPacket, all_pois, visited, zb)
except KeyboardInterrupt:
break
try:
c = sys.stdin.read(1)
print "Got character", repr(c)
if c == 'r':
signalReset(zb, all_pois)
except IOError: pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
# halt() must be called before closing the serial
# port in order to ensure proper thread shutdown
zb.halt()
ser.close()#!/usr/bin/python
# main function
def main():
# Create XBee library API object, which spawns a new thread
zb = ZigBee(ser, callback=message_received)
print 'starting'
test_case1(zb)
if __name__ == "__main__":
main()
| mit | 7,045,585,730,481,047,000 | 31.307692 | 81 | 0.596905 | false |
hpbader42/Klampt | Python/klampt/plan/cspace.py | 1 | 16996 | """This module provides convenient access to the motionplanning module
functionality by defining the CSpace and MotionPlan classes."""
import motionplanning
import random
class CSpace:
"""Used alongside MotionPlan to define a configuration space for
motion planning.
Attributes:
- eps: the collision tolerance used for checking edges, in the units
defined by the distance(a,b) method. (by default, euclidean distance)
- bound: a list of lower and upper bounds on the space [(l1,u1),...,(ld,ud)]
where d is the dimension of the configuration space.
- properties: a map of properties that may be used by a planner. See below
documentation for more detail.
Internally used attributes:
- cspace: a motionplanning module CSpaceInterface object
- feasibilityTests: a list of one-argument functions that test feasibility
of this configuration space's constraints. E.g.,
if you have a collision tester that returns True if a configuration is in
collision, and also want to check bounds, you can set this to a list:
[self.in_bounds,lambda x not self.collides(x)]
You should not write this directly but instead use addFeasibilityTest.
- feasibilityTestNames: a list of names of feasibility tests. You should
not write this directly but instead use addFeasibilityTest.
To define a custom CSpace, subclasses will need to override:
- *feasible(x): returns true if the vector x is in the feasible space. By
default calls each function in self.feasibilityTests, which by default
only tests bound constraints.
- *sample(): returns a new vector x from a superset of the feasible space
- *sampleneighborhood(c,r): returns a new vector x from a neighborhood of c
with radius r
- *visible(a,b): returns true if the path between a and b is feasible
- *distance(a,b): return a distance between a and b
- *interpolate(a,b,u): interpolate between a, b with parameter u
(* indicates an optional override.)
To avoid memory leaks, CSpace.close() or motionplanning.destroy() must
be called when you are done. (The latter deallocates all previously
created cspaces and planners)
If sample() is not defined, then subclasses should set self.bound to be a
list of pairs defining an axis-aligned bounding box. The setBounds method
is a convenient way of defining this.
If visible() is not defined, then paths are checked by subdivision, with
the collision tolerance self.eps.
To help planners know a bit more about the CSpace, you can set the
self.properties member to a map from strings to values. Useful values
are
- euclidean (0 or 1): indicates a euclidean space
- geodesic (0 or 1): indicates whether the interpolation is along
geodesics.
- volume (real): a size of the space
- minimum, maximum (real array): bounds on the space
- metric (string): the metric used by distance, can be "euclidean",
"weighted euclidean", "manhattan", "weighted manhattan", "Linf", etc.
- metricWeights (real array): weights used by weighted metrics
volume is necessary for Lazy PRM* and Lazy RRG* to work.
metricWeights is necessary for KD-tree point location structures to work,
for FMM methods to work, etc.
minimum/maximum can be used by grid-based methods (optional for FMM, FMM*).
"""
def __init__(self):
self.cspace = None
self.feasibilityTests = None
self.feasibilityTestNames = None
self.feasibilityTestDependencies = None
self.eps = 1e-3
self.bound = [(0,1)]
self.properties = {}
def setBounds(self,bound):
"""Convenience function: sets the sampling bound and the
space properties in one line."""
self.bound = bound
self.properties["minimum"] = [b[0] for b in bound]
self.properties["maximum"] = [b[1] for b in bound]
volume = 1
for b in self.bound:
if b[0] != b[1]: volume *= b[1]-b[0]
self.properties['volume'] = volume
def close(self):
"""This method must be called to free the memory associated with the
planner. Alternatively, motionplanning.destroy() can be called to
free all previously constructed CSpace and MotionPlan objects."""
if self.cspace is not None:
self.cspace.destroy()
self.cspace = None
def setup(self,reinit = False):
"""Called internally by the MotionPlan class to set up planning
hooks.
If reinit is not set to True, and the setup() method has been called before,
a warning message will be printed. Set it to True to suppress this message."""
if self.cspace is not None:
if not reinit:
print "CSpace.setup(): Performance warning, called twice, destroying previous CSpaceInterface object"
self.cspace.destroy()
self.cspace = motionplanning.CSpaceInterface()
if self.feasibilityTests is not None:
for n,f in zip(self.feasibilityTestNames,self.feasibilityTests):
self.cspace.addFeasibilityTest(n,f)
self.cspace.enableAdaptiveQueries()
for (n,d) in self.feasibilityTestDependencies:
self.cspace.setFeasibilityDependency(n,d)
else:
if hasattr(self,'feasible'):
self.cspace.setFeasibility(getattr(self,'feasible'))
else:
raise 'Need feasible method or addFeasibilityTests'
if hasattr(self,'visible'):
self.cspace.setVisibility(getattr(self,'visible'))
else:
self.cspace.setVisibilityEpsilon(self.eps)
if hasattr(self,'sample'):
self.cspace.setSampler(getattr(self,'sample'))
else:
raise 'Need sample method'
if hasattr(self,'sampleneighborhood'):
self.cspace.setNeighborhoodSampler(getattr(self,'sampleneighborhood'))
if hasattr(self,'distance'):
self.cspace.setDistance(getattr(self,'distance'))
if hasattr(self,'interpolate'):
self.cspace.setInterpolate(getattr(self,'interpolate'))
for (k,v) in self.properties.iteritems():
if isinstance(v,(list,tuple)):
self.cspace.setProperty(k," ".join([str(item) for item in v]))
else:
self.cspace.setProperty(k,str(v))
def sample(self):
"""Overload this to define a nonuniform sampler.
By default, it will sample from the axis-aligned bounding box
defined by self.bound. To define a different domain, set self.bound
to the desired bound.
"""
return [random.uniform(*b) for b in self.bound]
def sampleneighborhood(self,c,r):
"""Overload this to define a nonuniform sampler.
By default, it will sample from the axis-aligned box of radius r
around c, but clamped to the bound.
"""
return [random.uniform(max(b[0],ci-r),min(b[1],ci+r)) for ci,b in zip(c,self.bound)]
def addFeasibilityTest(self,func,name=None,dependencies=None):
"""Adds a new feasibility test with the given function func(x) and the specified name.
If name is not provided (default) a default name is generated.
If dependencies is provided, it can be a string or a list of strings,
indicating that this test must be called after some other test(s).
"""
if self.feasibilityTests is None:
self.feasibilityTests = []
self.feasibilityTestNames = []
self.feasibilityTestDependencies = []
assert name is None or isinstance(name,str),"Name argument 'name' must be a string"
assert callable(func),"Feasibility test 'func' must be a callable object"
self.feasibilityTests.append(func)
if name is None:
name = "test_"+str(len(self.feasibilityTests)-1)
self.feasibilityTestNames.append(name)
if dependencies is not None:
if isinstance(dependencies,(list,tuple)):
for d in dependencies:
self.feasibilityTestDependencies.append((name,d))
else:
self.feasibilityTestDependencies.append((name,dependencies))
def inBounds(self,x):
"""Returns true if x is within the given bounds"""
return all(a<=xi<=b for (xi,(a,b)) in zip(x,self.bound))
def feasible(self,x):
"""Overload this to define your new feasibility test.
By default the implementation simply tests the bounds constraint, or if self.feasibilityTests
is not empty, tests each function in self.feasibilityTests."""
if self.feasibilityTests is None:
return self.inBounds(x)
else:
for test in self.feasibilityTests:
if not test(x): return False
return True
def isFeasible(self,x):
"""An overload for self.cspace.isFeasible. Use this to test feasibility of a configuration
(rather than feasible()) if you wish to take advantage of adaptive feasibility testing and
constraint testing statistics."""
return self.cspace.isFeasible(x)
def isVisible(self,x,y):
"""An overload for self.cspace.isVisible. Use this to test visibility of a line
(rather than visible()) if you want to use the natural visibility tester, wish to take
advantage of adaptive visibility testing, or want to use constraint testing statistics."""
return self.cspace.isVisible(x,y)
def getStats(self):
"""Returns a dictionary mapping statistic names to values. Result contains
fraction of feasible configurations, edges, etc. If feasibility tests are
individually specified, returns stats for individual tests as well. """
if self.cspace is None: return {}
return self.cspace.getStats()
class MotionPlan:
"""A motion planner instantiated on a space. Currently supports
only kinematic, point-to-point plans.
Multi-query roadmaps are supported for the PRM and SBLPRT algorithms.
In multi-query mode, you may call addMilestone(q) to add a new milestone.
addMilestone() returns the milestone's index, which can be used
in later calls to getPath().
Planner parameters must be set by calling the static
MotionPlan.setOptions(param1=value1,param2=value2,...) method BEFORE
calling the MotionPlan(space,type) constructor.
If type is not specified in the constructor, the planning algorithm
will be chosen by default.
Note that MotionPlan.close() or motionplanning.destroy() must be called
to free memory after you are done.
"""
def __init__(self,space,type=None,**options):
"""Initializes a plan with a given CSpace and a given type.
Optionally, planner options can be set via keyword arguments.
Valid values for type are:
- prm: the Probabilistic Roadmap algorithm
- rrt: the Rapidly Exploring Random Trees algorithm
- sbl: the Single-Query Bidirectional Lazy planner
- sblprt: the probabilistic roadmap of trees (PRT) algorithm with SBL as the inter-root planner.
- rrt*: the RRT* algorithm for optimal motion planning
- prm*: the PRM* algorithm for optimal motion planning
- lazyprm*: the Lazy-PRM* algorithm for optimal motion planning
- lazyrrg*: the Lazy-RRG* algorithm for optimal motion planning
- fmm: the fast marching method algorithm for resolution-complete optimal motion planning
- fmm*: an anytime fast marching method algorithm for optimal motion planning
(this list may be out-of-date; the most current documentation
is listed in src/motionplanning.h)
"""
if space.cspace is None:
space.setup()
if type != None:
motionplanning.setPlanType(type)
if len(options) > 0:
MotionPlan.setOptions(**options)
self.space = space
self.planner = motionplanning.PlannerInterface(space.cspace)
def close(self):
"""This method must be called to free the memory associated with the
planner. Alternatively, motionplanning.destroy() can be called to
free all previously constructed CSpace and MotionPlan objects."""
self.planner.destroy()
@staticmethod
def setOptions(**opts):
"""Sets a numeric or string-valued setting for the next constructed
planner. Arguments are specified by key-value pair.
Valid keys are:
- "knn": k value for the k-nearest neighbor connection strategy
(used in PRM)
- "connectionThreshold": a milestone connection threshold
- "perturbationRadius": (for RRT and SBL)
- "bidirectional": 1 if bidirectional planning is requested (used
in RRT)
- "grid": 1 if a point selection grid should be used (used in SBL)
- "gridResolution": resolution for the grid, if the grid should
be used (used in SBL with grid, FMM, FMM*)
- "suboptimalityFactor": allowable suboptimality (used in RRT*,
lazy PRM*, lazy RRG*)
- "randomizeFrequency": a grid randomization frequency (for SBL)
- "shortcut": nonzero if you wish to perform shortcutting to
improve a path after a first plan is found.
- "restart": nonzero if you wish to restart the planner to
get progressively better paths with the remaining time.
- "pointLocation": a string designating a point location data
structure. "kdtree" is supported, optionally followed by a
weight vector (used in PRM, RRT*, PRM*, LazyPRM*, LazyRRG*)
- "restartTermCond": used if the "restart" setting is true.
This is a JSON string defining the termination condition
(default value:
"{foundSolution:1;maxIters:1000}")
(this list may be out-of-date; the most current documentation
is listed in motionplanning.h)
"""
for (a,b) in opts.items():
if a=='type':
motionplanning.setPlanType(str(b))
elif isinstance(b,str):
motionplanning.setPlanSetting(a,b)
else:
motionplanning.setPlanSetting(a,float(b))
def setEndpoints(self,start,goal):
"""Sets the start and goal configuration. goal can also be a
*goal test*, which is a function taking one argument f(q) that
returns true if the configuration is at the goal and false
otherwise. Another representation of a goal test is a pair
(f,s) where f() tests whether the configuration is at the goal,
and s() generates a new sample at the goal."""
if hasattr(goal,'__call__'):
self.planner.setEndpointSet(start,goal)
elif(len(goal) == 2 and hasattr(goal[0],'__call__')):
self.planner.setEndpointSet(start,goal[0],goal[1])
else:
self.planner.setEndpoints(start,goal)
def addMilestone(self,x):
"""Manually adds a milestone and returns its index"""
return self.planner.addMilestone(x);
def planMore(self,iterations):
"""Performs a given number of iterations of planning."""
self.planner.planMore(iterations)
def getPath(self,milestone1=None,milestone2=None):
"""Returns the path between the two milestones. If no
arguments are provided, this returns the path between the
start and goal.
The path is a list of configurations (each configuration is a Python
list)."""
if milestone1==None and milestone2==None:
return self.planner.getPathEndpoints();
else:
return self.planner.getPath(milestone1,milestone2)
def getRoadmap(self):
"""Returns a graph (V,E) containing the planner's roadmap.
V is a list of configurations (each configuration is a Python list)
and E is a list of edges (each edge is a pair (i,j) indexing into V).
"""
return self.planner.getRoadmap()
def getStats(self):
"""Returns a dictionary mapping statistic names to values. Result is
planner-dependent """
return self.planner.getStats()
def _selfTest():
c = CSpace()
c.bound = [(-2,2),(-2,2)]
c.feasible = lambda x: pow(x[0],2.0)+pow(x[1],2.0) > 1.0
c.setup()
MotionPlan.setOptions(type="rrt")
print "Setup complete"
p = MotionPlan(c)
print "Setting endpoints"
p.setEndpoints([-1.5,0],[1.5,0])
print "PlanMore"
p.planMore(100)
print "GetPath"
path = p.getPath()
print "Resulting path:"
print path
p.close()
c.close()
if __name__=="__main__":
_selfTest() | bsd-3-clause | 8,012,731,611,532,133,000 | 44.446524 | 117 | 0.648623 | false |
uclouvain/osis_louvain | base/templatetags/dictionnary.py | 1 | 1419 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import template
register = template.Library()
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
| agpl-3.0 | -5,328,089,672,373,702,000 | 41.969697 | 87 | 0.654443 | false |
BuildmLearn/University-Campus-Portal-UCP | docs/conf.py | 1 | 9863 | # -*- coding: utf-8 -*-
#
# UCP documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 22 18:19:29 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../UCP'))
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'UCP.settings'
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'UCP'
copyright = u'2016, Pranav Tiwari'
author = u'Pranav Tiwari'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'UCP v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'UCPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'UCP.tex', u'UCP Documentation',
u'Pranav Tiwari', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ucp', u'UCP Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'UCP', u'UCP Documentation',
author, 'UCP', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| bsd-3-clause | 3,817,037,162,446,109,000 | 27.50578 | 80 | 0.690662 | false |
KiviMao/kivi | Script/Show-Comments-story/Get-All-User.py | 1 | 1268 | #!/usr/bin/python3
import MySQLdb
import os
import re
db = MySQLdb.connect("etos39.cn.ao.ericsson.se","automation","automation","gerrit_data_new")
# db = MySQLdb.connect("localhost","root","root","work" )
cursor = db.cursor()
cursor.execute('SELECT reviewer_username FROM comments GROUP BY reviewer_username')
usersList = cursor.fetchall()
UM = {}
for users in usersList:
for user in users:
if user != None:
outPut = os.popen('/usr/bin/ldapsearch -x -LLL -D "uid=COVESEOS,ou=Users,ou=Internal,o=ericsson" -w 1qaz\@WSX -b "uid='+user+',ou=Users,ou=Internal,o=ericsson" -h ecd.ericsson.se -p 389|grep eriOperationalManager:|awk \'{print $2}\'','r')
if outPut != None:
try:
param = []
param=(str(user),str(outPut.read()))
rule=re.compile(r'[^a-zA-z]')
username = rule.sub('',str(user))
managername = rule.sub('',param[1])
print(username)
cursor.execute("""INSERT INTO person(username,manager)VALUES(%s,%s)""",(username,managername))
db.commit()
except Exception as e:
print e
db.rollback() | apache-2.0 | -2,548,353,246,092,161,500 | 36.323529 | 251 | 0.552839 | false |
mgaillard/CNNFeaturesRobustness | features_extractor/main.py | 1 | 9880 | """
A program that extract features from images using CNN.
"""
import argparse
import sys
from os.path import isdir
import numpy as np
from image_features import FeatureExtractor, FeaturesNpzIO, FeaturesHdf5IO
def main():
# parse program arguments
parser = argparse.ArgumentParser()
parser.add_argument('directory',
type=str,
help='Which directory to process')
parser.add_argument('--output',
type=str,
help='In which file to save the features',
default='dataset')
parser.add_argument('--model',
type=str,
choices=['InceptionV3_predictions',
'InceptionV3_mixed10_avg',
'InceptionV3_mixed10_max',
'InceptionV3_mixed9_avg',
'InceptionV3_mixed9_max',
'InceptionV3_mixed9_max_pca_64',
'InceptionV3_mixed8_avg',
'InceptionV3_mixed8_max',
'InceptionV3_mixed7_avg',
'InceptionV3_mixed7_max',
'InceptionV3_mixed6_avg',
'InceptionV3_mixed6_max',
'InceptionV3_mixed5_avg',
'InceptionV3_mixed5_max',
'InceptionV3_mixed4_avg',
'InceptionV3_mixed4_max',
'InceptionV3_mixed3_avg',
'InceptionV3_mixed3_max',
'InceptionV3_mixed2_avg',
'InceptionV3_mixed2_max',
'InceptionV3_mixed1_avg',
'InceptionV3_mixed1_max',
'InceptionV3_mixed0_avg',
'InceptionV3_mixed0_max',
'Xception_predictions',
'Xception_block14_sepconv2_act_avg',
'Xception_block14_sepconv2_act_max',
'Xception_block14_sepconv1_act_avg',
'Xception_block14_sepconv1_act_max',
'Xception_add_12_avg',
'Xception_add_12_max',
'Xception_add_11_avg',
'Xception_add_11_max',
'Xception_add_10_avg',
'Xception_add_10_max',
'Xception_add_9_avg',
'Xception_add_9_max',
'Xception_add_8_avg',
'Xception_add_8_max',
'Xception_add_7_avg',
'Xception_add_7_max',
'Xception_add_6_avg',
'Xception_add_6_max',
'Xception_add_5_avg',
'Xception_add_5_max',
'Xception_add_4_avg',
'Xception_add_4_max',
'Xception_add_3_avg',
'Xception_add_3_max',
'Xception_add_2_avg',
'Xception_add_2_max',
'Xception_add_1_avg',
'Xception_add_1_max',
'ResNet50_predictions',
'ResNet50_flatten_1',
'ResNet50_flatten_1_norm_l2',
'ResNet50_avg_pool_avg',
'ResNet50_avg_pool_avg_norm_l2',
'ResNet50_avg_pool_max',
'ResNet50_avg_pool_max_norm_l2',
'ResNet50_activation_46_avg',
'ResNet50_activation_46_max',
'ResNet50_activation_46_max_pca_64',
'ResNet50_activation_43_avg',
'ResNet50_activation_43_max',
'ResNet50_activation_43_max_pca_64',
'ResNet50_activation_40_avg',
'ResNet50_activation_40_max',
'ResNet50_activation_37_avg',
'ResNet50_activation_37_max',
'ResNet50_activation_34_avg',
'ResNet50_activation_34_max',
'ResNet50_activation_31_avg',
'ResNet50_activation_31_max',
'ResNet50_activation_28_avg',
'ResNet50_activation_28_max',
'ResNet50_activation_22_avg',
'ResNet50_activation_22_max',
'ResNet50_activation_19_avg',
'ResNet50_activation_19_max',
'ResNet50_activation_16_avg',
'ResNet50_activation_16_max',
'ResNet50_activation_13_avg',
'ResNet50_activation_13_max',
'ResNet50_activation_10_avg',
'ResNet50_activation_10_max',
'ResNet50_activation_7_avg',
'ResNet50_activation_7_max',
'ResNet50_activation_4_avg',
'ResNet50_activation_4_max',
'VGG16_predictions',
'VGG16_fc2',
'VGG16_fc2_norm_l2',
'VGG16_fc1',
'VGG16_fc1_norm_l2',
'VGG16_flatten',
'VGG16_flatten_norm_l2',
'VGG16_block5_pool_avg',
'VGG16_block5_pool_avg_norm_l2',
'VGG16_block5_pool_max',
'VGG16_block5_pool_max_norm_l2',
'VGG16_block5_pool_max_pca_64',
'VGG16_block5_pool_max_pca_64_norm_l2',
'VGG16_block4_pool_avg',
'VGG16_block4_pool_avg_norm_l2',
'VGG16_block4_pool_max',
'VGG16_block4_pool_max_norm_l2',
'VGG16_block3_pool_avg',
'VGG16_block3_pool_avg_norm_l2',
'VGG16_block3_pool_max',
'VGG16_block3_pool_max_norm_l2',
'VGG19_predictions',
'VGG19_fc2',
'VGG19_fc2_norm_l2',
'VGG19_fc1',
'VGG19_fc1_norm_l2',
'VGG19_flatten',
'VGG19_flatten_norm_l2',
'VGG19_block5_pool_avg',
'VGG19_block5_pool_avg_norm_l2',
'VGG19_block5_pool_max',
'VGG19_block5_pool_max_norm_l2',
'VGG19_block5_pool_max_pca_64',
'VGG19_block5_pool_max_pca_64_norm_l2',
'VGG19_block4_pool_avg',
'VGG19_block4_pool_avg_norm_l2',
'VGG19_block4_pool_max',
'VGG19_block4_pool_max_norm_l2',
'VGG19_block3_pool_avg',
'VGG19_block3_pool_avg_norm_l2',
'VGG19_block3_pool_max',
'VGG19_block3_pool_max_norm_l2'],
help='Which model to use to extract features',
default='VGG16_block5_pool_avg')
parser.add_argument('--format',
type=str,
choices=['npz', 'h5'],
help='In which format to save the features',
default='npz')
args = parser.parse_args()
if not isdir(args.directory):
print('The provided directory doesn\'t exist.')
sys.exit()
# Extract features
extractor = FeatureExtractor(args.model)
features = extractor.extract(args.directory)
# Display information about the features
print('Number of images: {}'.format(features.features.shape[0]))
print('Features shape: {}'.format(features.features.shape))
print('Mean of the features of the first image: {}'.format(np.mean(features.features[0])))
print('L2 norm of the features of the first image: {}'.format(np.linalg.norm(features.features[0], 2)))
print('Features of the first image:\n{}'.format(features.features[0]))
# Save the features
if args.format == 'npz':
FeaturesNpzIO.save(args.output, features)
elif args.format == 'h5':
FeaturesHdf5IO.save(args.output, features)
if __name__ == "__main__":
main()
| gpl-3.0 | 3,966,462,580,146,087,400 | 49.661538 | 107 | 0.382731 | false |
Hoikas/korman | korman/ui/ui_toolbox.py | 1 | 1445 | # This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
class ToolboxPanel:
bl_category = "Tools"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
@classmethod
def poll(cls, context):
return context.object and context.scene.render.engine == "PLASMA_GAME"
class PlasmaToolboxPanel(ToolboxPanel, bpy.types.Panel):
bl_context = "objectmode"
bl_label = "Plasma"
def draw(self, context):
layout = self.layout
col = layout.column(align=True)
col.label("Enable All:")
col.operator("object.plasma_enable_all_objects", icon="OBJECT_DATA")
col.operator("texture.plasma_enable_all_textures", icon="TEXTURE")
col.label("Convert All:")
col.operator("texture.plasma_convert_layer_opacities", icon="IMAGE_RGB_ALPHA")
| gpl-3.0 | 3,638,843,941,275,409,000 | 34.243902 | 86 | 0.686505 | false |
monikagrabowska/osf.io | addons/base/models.py | 1 | 30775 | import abc
import os
import time
import markupsafe
import requests
from django.db import models
from framework.auth import Auth
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError, PermissionsError
from mako.lookup import TemplateLookup
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.external import ExternalAccount
from osf.models.node import AbstractNode
from osf.models.user import OSFUser
from osf.modm_compat import Q
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from website import settings
from addons.base import logger, serializer
from website.oauth.signals import oauth_complete
from website.util import waterbutler_url_for
lookup = TemplateLookup(
directories=[
settings.TEMPLATES_PATH
],
default_filters=[
'unicode', # default filter; must set explicitly when overriding
# FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it
# gets re-escaped by Markupsafe. See [#OSF-4432]
'temp_ampersand_fixer',
'h',
],
imports=[
# FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it
# gets re-escaped by Markupsafe. See [#OSF-4432]
'from website.util.sanitize import temp_ampersand_fixer',
]
)
class BaseAddonSettings(ObjectIDMixin, BaseModel):
deleted = models.BooleanField(default=False)
class Meta:
abstract = True
@property
def config(self):
return self._meta.app_config
@property
def short_name(self):
return self.config.short_name
def delete(self, save=True):
self.deleted = True
self.on_delete()
if save:
self.save()
def undelete(self, save=True):
self.deleted = False
self.on_add()
if save:
self.save()
def to_json(self, user):
return {
'addon_short_name': self.config.short_name,
'addon_full_name': self.config.full_name,
}
#############
# Callbacks #
#############
def on_add(self):
"""Called when the addon is added (or re-added) to the owner (User or Node)."""
pass
def on_delete(self):
"""Called when the addon is deleted from the owner (User or Node)."""
pass
class BaseUserSettings(BaseAddonSettings):
owner = models.OneToOneField(OSFUser, blank=True, null=True, related_name='%(app_label)s_user_settings')
class Meta:
abstract = True
@property
def public_id(self):
return None
@property
def has_auth(self):
"""Whether the user has added credentials for this addon."""
return False
# TODO: Test me @asmacdo
@property
def nodes_authorized(self):
"""Get authorized, non-deleted nodes. Returns an empty list if the
attached add-on does not include a node model.
"""
model = self.config.node_settings
if not model:
return []
return [obj.owner for obj in model.objects.filter(user_settings=self, owner__is_deleted=False).select_related('owner')]
@property
def can_be_merged(self):
return hasattr(self, 'merge')
def to_json(self, user):
ret = super(BaseUserSettings, self).to_json(user)
ret['has_auth'] = self.has_auth
ret.update({
'nodes': [
{
'_id': node._id,
'url': node.url,
'title': node.title,
'registered': node.is_registration,
'api_url': node.api_url
}
for node in self.nodes_authorized
]
})
return ret
def __repr__(self):
if self.owner:
return '<{cls} owned by user {uid}>'.format(cls=self.__class__.__name__, uid=self.owner._id)
return '<{cls} with no owner>'.format(cls=self.__class__.__name__)
@oauth_complete.connect
def oauth_complete(provider, account, user):
if not user or not account:
return
user.add_addon(account.provider)
user.save()
class BaseOAuthUserSettings(BaseUserSettings):
# Keeps track of what nodes have been given permission to use external
# accounts belonging to the user.
oauth_grants = DateTimeAwareJSONField(default=dict, blank=True)
# example:
# {
# '<Node._id>': {
# '<ExternalAccount._id>': {
# <metadata>
# },
# }
# }
#
# metadata here is the specific to each addon.
# The existence of this property is used to determine whether or not
# an addon instance is an "OAuth addon" in
# AddonModelMixin.get_oauth_addons().
oauth_provider = None
serializer = serializer.OAuthAddonSerializer
class Meta:
abstract = True
@property
def has_auth(self):
return bool(self.external_accounts)
@property
def external_accounts(self):
"""The user's list of ``ExternalAccount`` instances for this provider"""
return self.owner.external_accounts.filter(provider=self.oauth_provider.short_name)
def delete(self, save=True):
for account in self.external_accounts.filter(provider=self.config.short_name):
self.revoke_oauth_access(account, save=False)
super(BaseOAuthUserSettings, self).delete(save=save)
def grant_oauth_access(self, node, external_account, metadata=None):
"""Give a node permission to use an ``ExternalAccount`` instance."""
# ensure the user owns the external_account
if not self.owner.external_accounts.filter(id=external_account.id).exists():
raise PermissionsError()
metadata = metadata or {}
# create an entry for the node, if necessary
if node._id not in self.oauth_grants:
self.oauth_grants[node._id] = {}
# create an entry for the external account on the node, if necessary
if external_account._id not in self.oauth_grants[node._id]:
self.oauth_grants[node._id][external_account._id] = {}
# update the metadata with the supplied values
for key, value in metadata.iteritems():
self.oauth_grants[node._id][external_account._id][key] = value
self.save()
@must_be_logged_in
def revoke_oauth_access(self, external_account, auth, save=True):
"""Revoke all access to an ``ExternalAccount``.
TODO: This should accept node and metadata params in the future, to
allow fine-grained revocation of grants. That's not yet been needed,
so it's not yet been implemented.
"""
for node in self.get_nodes_with_oauth_grants(external_account):
try:
addon_settings = node.get_addon(external_account.provider, deleted=True).deauthorize(auth=auth)
except AttributeError:
# No associated addon settings despite oauth grant
pass
if external_account.osfuser_set.count() == 1 and \
external_account.osfuser_set.filter(osfuser=auth.user).count() == 1:
# Only this user is using the account, so revoke remote access as well.
self.revoke_remote_oauth_access(external_account)
for key in self.oauth_grants:
self.oauth_grants[key].pop(external_account._id, None)
if save:
self.save()
def revoke_remote_oauth_access(self, external_account):
""" Makes outgoing request to remove the remote oauth grant
stored by third-party provider.
Individual addons must override this method, as it is addon-specific behavior.
Not all addon providers support this through their API, but those that do
should also handle the case where this is called with an external_account
with invalid credentials, to prevent a user from being unable to disconnect
an account.
"""
pass
def verify_oauth_access(self, node, external_account, metadata=None):
"""Verify that access has been previously granted.
If metadata is not provided, this checks only if the node can access the
account. This is suitable to check to see if the node's addon settings
is still connected to an external account (i.e., the user hasn't revoked
it in their user settings pane).
If metadata is provided, this checks to see that all key/value pairs
have been granted. This is suitable for checking access to a particular
folder or other resource on an external provider.
"""
metadata = metadata or {}
# ensure the grant exists
try:
grants = self.oauth_grants[node._id][external_account._id]
except KeyError:
return False
# Verify every key/value pair is in the grants dict
for key, value in metadata.iteritems():
if key not in grants or grants[key] != value:
return False
return True
def get_nodes_with_oauth_grants(self, external_account):
# Generator of nodes which have grants for this external account
for node_id, grants in self.oauth_grants.iteritems():
node = AbstractNode.load(node_id)
if external_account._id in grants.keys() and not node.is_deleted:
yield node
def get_attached_nodes(self, external_account):
for node in self.get_nodes_with_oauth_grants(external_account):
if node is None:
continue
node_settings = node.get_addon(self.oauth_provider.short_name)
if node_settings is None:
continue
if node_settings.external_account == external_account:
yield node
def merge(self, user_settings):
"""Merge `user_settings` into this instance"""
if user_settings.__class__ is not self.__class__:
raise TypeError('Cannot merge different addons')
for node_id, data in user_settings.oauth_grants.iteritems():
if node_id not in self.oauth_grants:
self.oauth_grants[node_id] = data
else:
node_grants = user_settings.oauth_grants[node_id].iteritems()
for ext_acct, meta in node_grants:
if ext_acct not in self.oauth_grants[node_id]:
self.oauth_grants[node_id][ext_acct] = meta
else:
for k, v in meta:
if k not in self.oauth_grants[node_id][ext_acct]:
self.oauth_grants[node_id][ext_acct][k] = v
user_settings.oauth_grants = {}
user_settings.save()
try:
config = settings.ADDONS_AVAILABLE_DICT[
self.oauth_provider.short_name
]
Model = config.models['nodesettings']
except KeyError:
pass
else:
connected = Model.find(Q('user_settings', 'eq', user_settings))
for node_settings in connected:
node_settings.user_settings = self
node_settings.save()
self.save()
def to_json(self, user):
ret = super(BaseOAuthUserSettings, self).to_json(user)
ret['accounts'] = self.serializer(
user_settings=self
).serialized_accounts
return ret
#############
# Callbacks #
#############
def on_delete(self):
"""When the user deactivates the addon, clear auth for connected nodes.
"""
super(BaseOAuthUserSettings, self).on_delete()
nodes = [AbstractNode.load(node_id) for node_id in self.oauth_grants.keys()]
for node in nodes:
node_addon = node.get_addon(self.oauth_provider.short_name)
if node_addon and node_addon.user_settings == self:
node_addon.clear_auth()
class BaseNodeSettings(BaseAddonSettings):
owner = models.OneToOneField(AbstractNode, null=True, blank=True, related_name='%(app_label)s_node_settings')
class Meta:
abstract = True
@property
def complete(self):
"""Whether or not this addon is properly configured
:rtype bool:
"""
raise NotImplementedError()
@property
def configured(self):
"""Whether or not this addon has had a folder connected.
:rtype bool:
"""
return self.complete
@property
def has_auth(self):
"""Whether the node has added credentials for this addon."""
return False
def to_json(self, user):
ret = super(BaseNodeSettings, self).to_json(user)
ret.update({
'user': {
'permissions': self.owner.get_permissions(user)
},
'node': {
'id': self.owner._id,
'api_url': self.owner.api_url,
'url': self.owner.url,
'is_registration': self.owner.is_registration,
},
'node_settings_template': os.path.basename(self.config.node_settings_template),
})
return ret
def render_config_error(self, data):
"""
"""
# Note: `config` is added to `self` in `AddonConfig::__init__`.
template = lookup.get_template('project/addon/config_error.mako')
return template.get_def('config_error').render(
title=self.config.full_name,
name=self.config.short_name,
**data
)
#############
# Callbacks #
#############
def before_page_load(self, node, user):
"""
:param User user:
:param Node node:
"""
pass
def before_remove_contributor(self, node, removed):
"""
:param Node node:
:param User removed:
"""
pass
def after_remove_contributor(self, node, removed, auth=None):
"""
:param Node node:
:param User removed:
"""
pass
def before_make_public(self, node):
"""
:param Node node:
:returns: Alert message or None
"""
pass
def before_make_private(self, node):
"""
:param Node node:
:returns: Alert message or None
"""
pass
def after_set_privacy(self, node, permissions):
"""
:param Node node:
:param str permissions:
"""
pass
def before_fork(self, node, user):
"""Return warning text to display if user auth will be copied to a
fork.
:param Node node:
:param Uder user
:returns Alert message
"""
if hasattr(self, 'user_settings'):
if self.user_settings is None:
return (
u'Because you have not configured the authorization for this {addon} add-on, this '
u'{category} will not transfer your authentication to '
u'the forked {category}.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
)
elif self.user_settings and self.user_settings.owner == user:
return (
u'Because you have authorized the {addon} add-on for this '
u'{category}, forking it will also transfer your authentication to '
u'the forked {category}.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
)
else:
return (
u'Because the {addon} add-on has been authorized by a different '
u'user, forking it will not transfer authentication to the forked '
u'{category}.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
)
def after_fork(self, node, fork, user, save=True):
"""
:param Node node:
:param Node fork:
:param User user:
:param bool save:
:returns: Tuple of cloned settings and alert message
"""
clone = self.clone()
clone.user_settings = None
clone.owner = fork
if save:
clone.save()
return clone, None
def before_register(self, node, user):
"""
:param Node node:
:param User user:
:returns: Alert message
"""
pass
def after_register(self, node, registration, user, save=True):
"""
:param Node node:
:param Node registration:
:param User user:
:param bool save:
:returns: Tuple of cloned settings and alert message
"""
return None, None
def after_delete(self, node, user):
"""
:param Node node:
:param User user:
"""
pass
############
# Archiver #
############
class GenericRootNode(object):
path = '/'
name = ''
class BaseStorageAddon(BaseModel):
"""
Mixin class for traversing file trees of addons with files
"""
root_node = GenericRootNode()
class Meta:
abstract = True
@property
def archive_folder_name(self):
name = 'Archive of {addon}'.format(addon=self.config.full_name)
folder_name = getattr(self, 'folder_name', '').lstrip('/').strip()
if folder_name:
name = name + ': {folder}'.format(folder=folder_name)
return name
def _get_fileobj_child_metadata(self, filenode, user, cookie=None, version=None):
kwargs = dict(
provider=self.config.short_name,
path=filenode.get('path', ''),
node=self.owner,
user=user,
view_only=True,
)
if cookie:
kwargs['cookie'] = cookie
if version:
kwargs['version'] = version
metadata_url = waterbutler_url_for('metadata', _internal=True, **kwargs)
res = requests.get(metadata_url)
if res.status_code != 200:
raise HTTPError(res.status_code, data={'error': res.json()})
# TODO: better throttling?
time.sleep(1.0 / 5.0)
return res.json().get('data', [])
def _get_file_tree(self, filenode=None, user=None, cookie=None, version=None):
"""
Recursively get file metadata
"""
filenode = filenode or {
'path': '/',
'kind': 'folder',
'name': self.root_node.name,
}
if filenode.get('kind') == 'file' or 'size' in filenode:
return filenode
kwargs = {
'version': version,
'cookie': cookie,
}
filenode['children'] = [
self._get_file_tree(child, user, cookie=cookie)
for child in self._get_fileobj_child_metadata(filenode, user, **kwargs)
]
return filenode
class BaseOAuthNodeSettings(BaseNodeSettings):
# TODO: Validate this field to be sure it matches the provider's short_name
# NOTE: Do not set this field directly. Use ``set_auth()``
external_account = models.ForeignKey(ExternalAccount, null=True, blank=True,
related_name='%(app_label)s_node_settings')
# NOTE: Do not set this field directly. Use ``set_auth()``
# user_settings = fields.AbstractForeignField()
# The existence of this property is used to determine whether or not
# an addon instance is an "OAuth addon" in
# AddonModelMixin.get_oauth_addons().
oauth_provider = None
class Meta:
abstract = True
@abc.abstractproperty
def folder_id(self):
raise NotImplementedError(
"BaseOAuthNodeSettings subclasses must expose a 'folder_id' property."
)
@abc.abstractproperty
def folder_name(self):
raise NotImplementedError(
"BaseOAuthNodeSettings subclasses must expose a 'folder_name' property."
)
@abc.abstractproperty
def folder_path(self):
raise NotImplementedError(
"BaseOAuthNodeSettings subclasses must expose a 'folder_path' property."
)
@property
def nodelogger(self):
auth = None
if self.user_settings:
auth = Auth(self.user_settings.owner)
self._logger_class = getattr(
self,
'_logger_class',
type(
'{0}NodeLogger'.format(self.config.short_name.capitalize()),
(logger.AddonNodeLogger,),
{'addon_short_name': self.config.short_name}
)
)
return self._logger_class(
node=self.owner,
auth=auth
)
@property
def complete(self):
return bool(
self.has_auth and
self.external_account and
self.user_settings.verify_oauth_access(
node=self.owner,
external_account=self.external_account,
)
)
@property
def configured(self):
return bool(
self.complete and
(self.folder_id or self.folder_name or self.folder_path)
)
@property
def has_auth(self):
"""Instance has an external account and *active* permission to use it"""
return bool(
self.user_settings and self.user_settings.has_auth
) and bool(
self.external_account and self.user_settings.verify_oauth_access(
node=self.owner,
external_account=self.external_account
)
)
def clear_settings(self):
raise NotImplementedError(
"BaseOAuthNodeSettings subclasses must expose a 'clear_settings' method."
)
def set_auth(self, external_account, user, metadata=None, log=True):
"""Connect the node addon to a user's external account.
This method also adds the permission to use the account in the user's
addon settings.
"""
# tell the user's addon settings that this node is connected to it
user_settings = user.get_or_add_addon(self.oauth_provider.short_name)
user_settings.grant_oauth_access(
node=self.owner,
external_account=external_account,
metadata=metadata # metadata can be passed in when forking
)
user_settings.save()
# update this instance
self.user_settings = user_settings
self.external_account = external_account
if log:
self.nodelogger.log(action='node_authorized', save=True)
self.save()
def deauthorize(self, auth=None, add_log=False):
"""Remove authorization from this node.
This method should be overridden for addon-specific behavior,
such as logging and clearing non-generalizable settings.
"""
self.clear_auth()
def clear_auth(self):
"""Disconnect the node settings from the user settings.
This method does not remove the node's permission in the user's addon
settings.
"""
self.external_account = None
self.user_settings = None
self.save()
def before_remove_contributor_message(self, node, removed):
"""If contributor to be removed authorized this addon, warn that removing
will remove addon authorization.
"""
if self.has_auth and self.user_settings.owner == removed:
return (
u'The {addon} add-on for this {category} is authenticated by {name}. '
u'Removing this user will also remove write access to {addon} '
u'unless another contributor re-authenticates the add-on.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
name=removed.fullname,
)
# backwards compatibility
before_remove_contributor = before_remove_contributor_message
def after_remove_contributor(self, node, removed, auth=None):
"""If removed contributor authorized this addon, remove addon authorization
from owner.
"""
if self.user_settings and self.user_settings.owner == removed:
# Delete OAuth tokens
self.user_settings.oauth_grants[self.owner._id].pop(self.external_account._id)
self.user_settings.save()
self.clear_auth()
message = (
u'Because the {addon} add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
addon=self.config.full_name,
category=markupsafe.escape(node.category_display),
title=markupsafe.escape(node.title),
user=markupsafe.escape(removed.fullname)
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <u><a href="{url}">Settings</a></u> page.'
).format(url=url)
#
return message
def after_fork(self, node, fork, user, save=True):
"""After forking, copy user settings if the user is the one who authorized
the addon.
:return: A tuple of the form (cloned_settings, message)
"""
clone, _ = super(BaseOAuthNodeSettings, self).after_fork(
node=node,
fork=fork,
user=user,
save=False,
)
if self.has_auth and self.user_settings.owner == user:
metadata = None
if self.complete:
try:
metadata = self.user_settings.oauth_grants[node._id][self.external_account._id]
except (KeyError, AttributeError):
pass
clone.set_auth(self.external_account, user, metadata=metadata, log=False)
message = '{addon} authorization copied to forked {category}.'.format(
addon=self.config.full_name,
category=fork.project_or_component,
)
else:
message = (
u'{addon} authorization not copied to forked {category}. You may '
u'authorize this fork on the <u><a href="{url}">Settings</a></u> '
u'page.'
).format(
addon=self.config.full_name,
url=fork.web_url_for('node_setting'),
category=fork.project_or_component,
)
if save:
clone.save()
return clone, message
def before_register_message(self, node, user):
"""Return warning text to display if user auth will be copied to a
registration.
"""
if self.has_auth:
return (
u'The contents of {addon} add-ons cannot be registered at this time; '
u'the {addon} add-on linked to this {category} will not be included '
u'as part of this registration.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
)
# backwards compatibility
before_register = before_register_message
def serialize_waterbutler_credentials(self):
raise NotImplementedError("BaseOAuthNodeSettings subclasses must implement a \
'serialize_waterbutler_credentials' method.")
def serialize_waterbutler_settings(self):
raise NotImplementedError("BaseOAuthNodeSettings subclasses must implement a \
'serialize_waterbutler_settings' method.")
class BaseCitationsNodeSettings(BaseOAuthNodeSettings):
class Meta:
abstract = True
def serialize_waterbutler_settings(self, *args, **kwargs):
# required by superclass, not actually used
pass
def serialize_waterbutler_credentials(self, *args, **kwargs):
# required by superclass, not actually used
pass
def create_waterbutler_log(self, *args, **kwargs):
# required by superclass, not actually used
pass
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = self.oauth_provider(account=self.external_account)
return self._api
@property
def complete(self):
"""Boolean indication of addon completeness"""
return bool(self.has_auth and self.user_settings.verify_oauth_access(
node=self.owner,
external_account=self.external_account,
metadata={'folder': self.list_id},
))
@property
def root_folder(self):
"""Serialized representation of root folder"""
return self.serializer.serialized_root_folder
@property
def folder_id(self):
return self.list_id
@property
def folder_name(self):
return self.fetch_folder_name
@property
def folder_path(self):
return self.fetch_folder_name
@property
def fetch_folder_name(self):
"""Returns a displayable folder name"""
if self.list_id is None:
return ''
elif self.list_id == 'ROOT':
return 'All Documents'
else:
return self._fetch_folder_name
def clear_settings(self):
"""Clears selected folder configuration"""
self.list_id = None
def set_auth(self, *args, **kwargs):
"""Connect the node addon to a user's external account.
This method also adds the permission to use the account in the user's
addon settings.
"""
self.list_id = None
self.save()
return super(BaseCitationsNodeSettings, self).set_auth(*args, **kwargs)
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
if add_log:
self.owner.add_log(
'{0}_node_deauthorized'.format(self.provider_name),
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=auth,
)
self.clear_settings()
self.clear_auth()
self.save()
def after_delete(self, node=None, user=None):
self.deauthorize(Auth(user=user), add_log=True)
def on_delete(self):
self.deauthorize(add_log=False)
| apache-2.0 | 1,737,133,200,023,061,200 | 31.225131 | 127 | 0.579951 | false |
soylentdeen/Graffity | src/BCI/fibre_tips.py | 1 | 10191 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#avg 110,429166666667 142,303939393939 39,4660202129861 17,303315566334 358,067727272727 145,667575757576 39,0507475838317 18,0599499387257 607,147575757576 146,828787878788 39,4196129733035 17,4218484639878 854,394393939394 146,564696969697 38,8185123201097 18,0525720203619
import pyfits as fits
import numpy as np
from matplotlib import pyplot as plt
from scipy import ndimage
from astropy.modeling import models, fitting, Fittable2DModel
import argparse
northern_stations=['G2', 'J3', 'J4', 'J5', 'J6']
approx_scale = {'AT' : 80., 'UT' : 18.}
roof_x = np.array([110.4, 358.1, 607.1, 854.4])
roof_y = np.array([142.3, 145.7, 146.8, 146.6])
roof_pos = np.array([38.49, 38.54, 38.76, 39.80])
def mean_rms(data, axis=None, mean_fcn=np.mean):
m = mean_fcn(data, axis=axis)
r = np.sqrt(mean_fcn(np.square(data-m), axis=axis))
return m, r
def format_results(label, data, uncertainties, fmt_str):
out = label
for i in np.arange(len(data)):
out += ' ' + fmt_str.format(data[i], uncertainties[i])
return out
def fit_port(port, approx_PAp, roof_xp, roof_yp, rho, win, plot, array):
approx_dx=rho*np.sin(approx_PAp*np.pi/180)/approx_scale[array];
approx_dy=rho*np.cos(approx_PAp*np.pi/180)/approx_scale[array];
xmax=int(roof_xp-0.5*approx_dx)
ymax=int(roof_yp-0.5*approx_dy)
thumb=port[ymax-win:ymax+win+1, xmax-win:xmax+win+1]
y, x = np.mgrid[-win:win+1, -win:win+1]
g_init=models.Gaussian2D(amplitude=thumb.max(), x_mean=0, y_mean=0, x_stddev=3., y_stddev=3., theta=0.)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, x, y, thumb)
if plot >= 2:
plt.figure(figsize=(8, 2.5))
plt.subplot(1, 3, 1)
plt.imshow(thumb, origin='lower', interpolation='nearest', vmin=0, vmax=thumb.max())
plt.title("Data")
plt.subplot(1, 3, 2)
plt.imshow(g(x, y), origin='lower', interpolation='nearest', vmin=0, vmax=thumb.max())
plt.title("Model")
plt.subplot(1, 3, 3)
plt.imshow(thumb - g(x, y), origin='lower', interpolation='nearest', vmin=0, vmax=thumb.max())
plt.title("Residual")
plt.show()
x1 = xmax + g.x_mean
y1 = ymax + g.y_mean
x2max=int(roof_xp+0.5*approx_dx)
y2max=int(roof_yp+0.5*approx_dy)
thumb2=port[y2max-win:y2max+win+1, x2max-win:x2max+win+1]
g2_init=models.Gaussian2D(amplitude=thumb2.max(), x_mean=0, y_mean=0, x_stddev=g.x_stddev, y_stddev=g.y_stddev, theta=0.)
g2_init.x_stddev.fixed=True
g2_init.y_stddev.fixed=True
g2 = fit_g(g2_init, x, y, thumb2)
x2 = x2max+g2.x_mean
y2 = y2max+g2.y_mean
if plot >= 2:
plt.figure(figsize=(8, 2.5))
plt.subplot(1, 3, 1)
plt.imshow(thumb2, origin='lower', interpolation='nearest', vmin=0, vmax=thumb2.max())
plt.title("Data")
plt.subplot(1, 3, 2)
plt.imshow(g2(x, y), origin='lower', interpolation='nearest', vmin=0, vmax=thumb2.max())
plt.title("Model")
plt.subplot(1, 3, 3)
plt.imshow(thumb2 - g2(x, y), origin='lower', interpolation='nearest', vmin=0, vmax=thumb2.max())
plt.title("Residual")
plt.show()
return x1, y1, x2, y2
def do_all(data, dark, theta_in, rho_in, win, approx_PA, group, plot, array):
ngroups=int(data.shape[0])/group
if ngroups == 0:
ngroups=1
x1s=np.zeros((ngroups,4))
y1s=np.zeros((ngroups,4))
x2s=np.zeros((ngroups,4))
y2s=np.zeros((ngroups,4))
for g in np.arange(ngroups):
avg_data=np.mean(data[g*group:(g+1)*group, :, :], axis=0)-dark
for p in np.arange(4):
port=avg_data[0:250, p*250:(p+1)*250]
x1, y1, x2, y2 = fit_port(port, approx_PA[p], roof_x[p]-p*250, roof_y[p], rho_in, win, plot, array)
x1s[g, p]=x1+p*250
y1s[g, p]=y1
x2s[g, p]=x2+p*250
y2s[g, p]=y2
return x1s, y1s, x2s, y2s
if (__name__ == "__main__"):
parser = argparse.ArgumentParser(description='Measure star positions on acquisition camera.')
parser.add_argument('fname', nargs='+')
parser.add_argument("-w", "--window", help="full width of window for peak fitting", type=int)
parser.add_argument("-g", "--group", help="number of frames to average together", type=int)
parser.add_argument("-r", "--rho", help="separation of binary (mas)", type=float)
parser.add_argument("-t", "--theta", help="position angle of binary on sky (East of North)", type=float)
parser.add_argument("-d", "--dark_file", help="dark file", action='append')
parser.add_argument("-D", "--dark_outfile", help="file to save preprocessed dark")
parser.add_argument("-p", "--plot", help="what to plot. 0: nothing, 1: final fit results, 2: final results+individual fit residuals", type=int, default=0)
parser.add_argument("-v", "--verbose", help="verbosity level.", type=int, default=0)
args = parser.parse_args()
args.window
verbose=args.verbose
if args.dark_file is None:
dark=0.
else:
dark_file=args.dark_file
infile=fits.open(dark_file.pop())
prihdr=infile[0].header
dark=infile['IMAGING_DATA_ACQ']
darkhdr=dark.header
dark=dark.data
for f in dark_file:
dark=np.append(dark, fits.open(f)['IMAGING_DATA_ACQ'].data, axis=0)
NDIT=dark.shape[0]
dark=np.median(dark, axis=0)
if args.dark_outfile is not None:
pri=fits.PrimaryHDU(header=prihdr)
img=fits.ImageHDU(dark[None,:,:], header=darkhdr)
pri.header.set('HIERARCH ESO DET1 NDIT', NDIT, 'number of DITs accumulated together')
newfits=fits.HDUList([pri, img])
newfits.writeto(args.dark_outfile)
dark=dark[:250,:]
x1s_mean=np.zeros((len(args.fname),4))
x1s_rms=np.zeros((len(args.fname),4))
x2s_mean=np.zeros((len(args.fname),4))
x2s_rms=np.zeros((len(args.fname),4))
y1s_mean=np.zeros((len(args.fname),4))
y1s_rms=np.zeros((len(args.fname),4))
y2s_mean=np.zeros((len(args.fname),4))
y2s_rms=np.zeros((len(args.fname),4))
for f in np.arange(len(args.fname)):
fname=args.fname[f]
hdulist=fits.open(fname)
data=hdulist['IMAGING_DATA_ACQ'].data[:,:250,:]
if args.group is None:
group=data.shape[0]/4
else:
group=args.group
if args.rho is None or args.theta is None:
dx_in=hdulist[0].header["HIERARCH ESO INS SOBJ X"]
dy_in=hdulist[0].header["HIERARCH ESO INS SOBJ Y"]
if args.rho is None:
rho_in=np.sqrt(dx_in*dx_in+dy_in*dy_in)
else:
rho_in=args.rho
enc=np.zeros(4)
approx_PA=np.zeros(4)
sta=dict()
GVPORTS={7: 'GV1', 5: 'GV2', 3: 'GV3', 1:'GV4'}
config=''
array='AT'
for t in ('4', '3', '2', '1'):
port=GVPORTS[hdulist[0].header["HIERARCH ESO ISS CONF INPUT"+t]]
tel=hdulist[0].header["HIERARCH ESO ISS CONF T"+t+"NAME"]
if tel[0:2] == 'UT':
array='UT'
sta[port]=hdulist[0].header["HIERARCH ESO ISS CONF STATION"+t]
config += ' {port}: {tel}/{sta}/{dl}'.format(port=port, tel=tel, sta=sta[port], dl=hdulist[0].header["HIERARCH ESO ISS CONF DL"+t])
if args.theta is None:
theta_in=np.arctan2(dx_in,dy_in)*180./np.pi
else:
theta_in=args.theta
if args.window is None:
win=long(rho_in/2./approx_scale[array]/np.sqrt(2.))
else:
win=long(args.window)/2
# if array == 'UT':
if True:
for p in np.arange(4):
try:
rp=hdulist[0].header["HIERARCH ESO INS DROTOFF"+str(p+1)]
except (KeyError):
rp=roof_pos[p]
approx_PA[p] = 270.-rp
else:
for p in np.arange(4):
enc[p]=hdulist[0].header["HIERARCH ESO INS DROT"+str(p+1)+" ENC"]
approx_PA[p]=theta_in-enc[p]/200.;
if sta['GV'+str(p+1)] in northern_stations:
approx_PA[p] += 180.
if verbose:
print("*******************************************************************************")
print("File name : {}".format(fname))
print("FT target : {}".format(hdulist[0].header["HIERARCH ESO FT ROBJ NAME"]))
print("SC target : {}".format(hdulist[0].header["HIERARCH ESO INS SOBJ NAME"]))
print("Position angle: {}°".format(theta_in))
print("Separation : {} mas".format(rho_in))
print("Configuration :{}".format(config))
hdulist.close()
x1s, y1s, x2s, y2s = do_all(data, dark, theta_in, rho_in, win, approx_PA, group, args.plot, array)
x1s_mean[f,:], x1s_rms[f,:] = mean_rms(x1s, axis=0, mean_fcn=np.median)
y1s_mean[f,:], y1s_rms[f,:] = mean_rms(y1s, axis=0, mean_fcn=np.median)
x2s_mean[f,:], x2s_rms[f,:] = mean_rms(x2s, axis=0, mean_fcn=np.median)
y2s_mean[f,:], y2s_rms[f,:] = mean_rms(y2s, axis=0, mean_fcn=np.median)
if verbose:
print(format_results('x1:', x1s_mean[f,:], x1s_rms[f,:], '{:8.3f}±{:.4f}'))
print(format_results('y1:', y1s_mean[f,:], y1s_rms[f,:], '{:8.3f}±{:.4f}'))
print(format_results('x2:', x2s_mean[f,:], x2s_rms[f,:], '{:8.3f}±{:.4f}'))
print(format_results('y2:', y2s_mean[f,:], y2s_rms[f,:], '{:8.3f}±{:.4f}'))
txt = '{} {:8s} {:8s} '.format(fname,
hdulist[0].header["HIERARCH ESO FT ROBJ NAME"],
hdulist[0].header["HIERARCH ESO INS SOBJ NAME"])
for i in np.arange(4):
txt += '{:8.3f} {:8.3f} {:8.3f} {:8.3f} '.format(x1s_mean[f,i], y1s_mean[f,i], x2s_mean[f,i], y2s_mean[f,i])
print (txt)
| mit | 6,268,545,257,541,182,000 | 38.634241 | 303 | 0.554781 | false |
personalrobotics/dartpy | scripts/grasp_fuze_bottle.py | 1 | 7160 | #!/usr/bin/env python
from __future__ import division, print_function
import dartpy
import numpy
import os.path
WORKSPACE_BASE = '/home/mkoval/storage/dartpy-ws'
ROBOT_POSE = numpy.array([
[ -2.220e-16, 1.000e+00, 0.000e+00, 2.050e+00],
[ -1.000e+00, -2.220e-16, -0.000e+00, 3.050e+00],
[ -0.000e+00, 0.000e+00, 1.000e+00, 0.000e+00],
[ 0.000e+00, 0.000e+00, 0.000e+00, 1.000e+00]])
TABLE_POSE = numpy.array([
[ 1., 0., 0., 2.],
[ 0., 0., -1., 2.],
[ 0., 1., 0., 0.],
[ 0., 0., 0., 1.]])
BOTTLE_POSE = numpy.array([
[ 1. , 0. , 0. , 2. ],
[ 0. , 1. , 0. , 2.228],
[ 0. , 0. , 1. , 0.745],
[ 0. , 0. , 0. , 1. ]])
LEFT_RELAXED_POSITIONS = numpy.array(
[ 0.64, -1.76, 0.26, 1.96, 1.16, 0.87, 1.43])
RIGHT_RELAXED_POSITIONS = numpy.array(
[5.65, -1.76, -0.26, 1.96, -1.15, 0.87, -1.43])
RIGHT_GRASP_POSITIONS = numpy.array(
[4.82 , -0.607, -0.1 , 1.409, -0.031, 0.657, -0.035])
def set_pose(skeleton, pose):
# TODO: We should check that these exist and that the joint is a FreeJoint.
bodynode = skeleton.getRootBodyNode(0)
joint = bodynode.getParentJoint()
joint.setRelativeTransform(pose)
def attach(skeleton, target_bodynode):
if skeleton.getNumTrees() != 1:
raise ValueError('Only one root BodyNode is supported.')
source_bodynode = skeleton.getRootBodyNode(0)
# TODO: Change the type of the root joint to a FixedJoint.
if not source_bodynode.moveTo(target_bodynode):
raise ValueError('Failed moving BodyNode.')
def merge(skeleton, other_skeleton):
critera = dartpy.dynamics.Linkage.Critera()
critera.mStart = None # start at the root
for itree in xrange(other_skeleton.getNumTrees):
root_bodynode = other_skeleton.getRootBodyNode(itree)
if not root_bodynode.moveTo(skeleton, None):
raise ValueError(
'Failed moving BodyNode "{:s}" to Skeleton "{:s}".'.format(
root_bodynode.getName(), skeleton.getName()))
def compute_aabb(bodynode):
# TODO: This should be the default argument (#7).
bodynode_pose = bodynode.getTransform(dartpy.dynamics.Frame.World())
min_corner = numpy.array([numpy.PINF] * 3)
max_corner = numpy.array([numpy.NINF] * 3)
# TODO: Replace this with a getCollisionShapes() iterable (#10).
# TODO: This should iterate over ShapeNodes in the future.
for i in xrange(bodynode.getNumCollisionShapes()):
shape = bodynode.getCollisionShape(i)
shape_pose = numpy.dot(bodynode_pose, shape.getLocalTransform())
shape_extents = numpy.zeros(4)
# TODO: This should return a ndarray one-dimensional array (#8).
# TODO: This should use getBoundingBox instead (#9).
shape_extents[0:3] = shape.getBoundingBoxDim().reshape(3)
# TODO: It's awkward to use homogeneous coordinates here.
shape_extents[3] = 1.
shape_min_corner = numpy.dot(shape_pose, -0.5 * shape_extents)[0:3]
min_corner = numpy.minimum(min_corner, shape_min_corner)
shape_max_corner = numpy.dot(shape_pose, 0.5 * shape_extents)[0:3]
max_corner = numpy.maximum(max_corner, shape_max_corner)
return (min_corner, max_corner)
# TODO: All of this should be replaced by the the CatkinResourceRetriever.
local_retriever = dartpy.common.LocalResourceRetriever()
package_retriever = dartpy.utils.PackageResourceRetriever(local_retriever)
package_retriever.addPackageDirectory('herb_description',
os.path.join(WORKSPACE_BASE, 'devel', 'share', 'herb_description'))
package_retriever.addPackageDirectory('herb_description',
os.path.join(WORKSPACE_BASE, 'src','herb_description'))
package_retriever.addPackageDirectory('pr_ordata',
os.path.join(WORKSPACE_BASE, 'src','pr_ordata'))
urdf_loader = dartpy.utils.DartLoader()
print('Loading the table.')
table = urdf_loader.parseSkeleton(
'package://pr_ordata/data/furniture/table.urdf', package_retriever)
if not table:
raise ValueError('Failed loading table model.')
table_pose = numpy.array([
[1., 0., 0., 2.0],
[0., 0., -1., 2.0],
[0., 1., 0., 0.0],
[0., 0., 0., 1.0]])
set_pose(table, table_pose)
print('Loading a Fuze bottle.')
bottle = urdf_loader.parseSkeleton(
'package://pr_ordata/data/objects/fuze_bottle.urdf', package_retriever)
if not bottle:
raise ValueError('Failed loading bottle model.')
# TODO: This assumes that there is only one BodyNode in the Skeleton.
table_min, table_max = compute_aabb(table.getRootBodyNode(0))
table_center = (table_min + table_max) / 2.
bottle_pose = numpy.eye(4)
bottle_pose[0, 3] = table_center[0]
bottle_pose[1, 3] = table_center[1] + 0.3 * (table_max[1] - table_min[1])
bottle_pose[2, 3] = table_max[2] + 0.01
set_pose(bottle, bottle_pose)
print('Loading HERB.')
robot = urdf_loader.parseSkeleton(
'package://herb_description/robots/herb.urdf', package_retriever)
if not robot:
raise ValueError('Failed loading HERB model.')
robot_in_table = numpy.array([
[0., 1., 0., 0.000],
[0., 0., 1., 0.000],
[1., 0., 0., -1.025],
[0., 0., 0., 1.000]])
robot_pose = numpy.dot(table_pose, robot_in_table)
robot_pose[2, 3] = 0
set_pose(robot, robot_pose)
# TODO: The IncludeBoth_t() type-tag is awkward (#11).
# TODO: It's counter-intuitive that this includes j1, the parent of wam1.
left_arm = dartpy.dynamics.Chain.create(
robot.getBodyNode('/left/wam1'), robot.getBodyNode('/left/wam7'),
dartpy.dynamics.Chain.IncludeBoth_t(), 'left_arm')
right_arm = dartpy.dynamics.Chain.create(
robot.getBodyNode('/right/wam1'), robot.getBodyNode('/right/wam7'),
dartpy.dynamics.Chain.IncludeBoth_t(), 'right_arm')
head = dartpy.dynamics.Chain.create(
robot.getBodyNode('/head/wam1'), robot.getBodyNode('/head/wam2'),
dartpy.dynamics.Chain.IncludeBoth_t(), 'head')
# Move HERB to the home configuration.
left_arm.setPositions(LEFT_RELAXED_POSITIONS)
right_arm.setPositions(RIGHT_RELAXED_POSITIONS)
# Find an IK solution.
right_ee = right_arm.getBodyNode(right_arm.getNumBodyNodes() - 1)
right_ee_ik = right_ee.getOrCreateIK()
# TODO: I should call setDofs(right_arm.getDofs()), but I can't because
# support for std::vector is not finished.
# Find an IK solution.
right_ee_ik.getTarget().setRelativeTransform(numpy.array([
[ 1.091e-01, 3.633e-06, 9.940e-01, 1.771e+00],
[ 9.940e-01, 3.287e-06, -1.091e-01, 2.253e+00],
[ -3.664e-06, 1.000e+00, -3.253e-06, 8.655e-01],
[ 0.000e+00, 0.000e+00, 0.000e+00, 1.000e+00]
]))
if not right_ee_ik.solve(False):
raise ValueError('Failed to find IK solution.')
right_ee_positions = right_ee_ik.getPositions()
# TODO: Plan to right_ee_positions, instead of teleporting to them.
right_arm.setPositions(right_ee_positions)
# TODO: Close the hand.
# Grab the bottle
attach(bottle, right_ee)
"""
import aikido_rviz
aikido_rviz.init_node('dartpy_grasp_fuze_bottle')
viewer = aikido_rviz.InteractiveMarkerViewer('aikido_markers')
viewer.addSkeleton(robot)
viewer.addSkeleton(bottle)
viewer.addSkeleton(table)
viewer.update()
"""
| bsd-2-clause | 4,882,178,688,475,679,000 | 35.530612 | 79 | 0.658659 | false |
vikkyrk/incubator-beam | sdks/python/apache_beam/examples/complete/game/hourly_team_score.py | 1 | 12102 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Second in a series of four pipelines that tell a story in a 'gaming' domain.
In addition to the concepts introduced in `user_score`, new concepts include:
windowing and element timestamps; use of `Filter`.
This pipeline processes data collected from gaming events in batch, building on
`user_score` but using fixed windows. It calculates the sum of scores per team,
for each window, optionally allowing specification of two timestamps before and
after which data is filtered out. This allows a model where late data collected
after the intended analysis window can be included, and any late-arriving data
prior to the beginning of the analysis window can be removed as well. By using
windowing and adding element timestamps, we can do finer-grained analysis than
with the `user_score` pipeline. However, our batch processing is high-latency,
in that we don't get results from plays at the beginning of the batch's time
period until the batch is processed.
To execute this pipeline using the static example input data, specify the
`--dataset=YOUR-DATASET` flag along with other runner specific flags. (Note:
BigQuery dataset you specify must already exist.)
Optionally include the `--input` argument to specify a batch input file. To
indicate a time after which the data should be filtered out, include the
`--stop_min` arg. E.g., `--stop_min=2015-10-18-23-59` indicates that any data
timestamped after 23:59 PST on 2015-10-18 should not be included in the
analysis. To indicate a time before which data should be filtered out, include
the `--start_min` arg. If you're using the default input
"gs://dataflow-samples/game/gaming_data*.csv", then
`--start_min=2015-11-16-16-10 --stop_min=2015-11-17-16-10` are good values.
"""
from __future__ import absolute_import
import argparse
import datetime
import logging
import apache_beam as beam
from apache_beam import typehints
from apache_beam.io import ReadFromText
from apache_beam.metrics import Metrics
from apache_beam.transforms.window import FixedWindows
from apache_beam.transforms.window import TimestampedValue
from apache_beam.typehints import with_input_types
from apache_beam.typehints import with_output_types
from apache_beam.utils.pipeline_options import GoogleCloudOptions
from apache_beam.utils.pipeline_options import PipelineOptions
from apache_beam.utils.pipeline_options import SetupOptions
class ParseEventFn(beam.DoFn):
"""Parses the raw game event info into GameActionInfo tuples.
Each event line has the following format:
username,teamname,score,timestamp_in_ms,readable_time
e.g.:
user2_AsparagusPig,AsparagusPig,10,1445230923951,2015-11-02 09:09:28.224
The human-readable time string is not used here.
"""
def __init__(self):
super(ParseEventFn, self).__init__()
self.num_parse_errors = Metrics.counter(self.__class__, 'num_parse_errors')
def process(self, element):
components = element.split(',')
try:
user = components[0].strip()
team = components[1].strip()
score = int(components[2].strip())
timestamp = int(components[3].strip())
yield {'user': user, 'team': team, 'score': score, 'timestamp': timestamp}
except: # pylint: disable=bare-except
# Log and count parse errors.
self.num_parse_errors.inc()
logging.info('Parse error on %s.', element)
@with_input_types(ints=typehints.Iterable[int])
@with_output_types(int)
def sum_ints(ints):
return sum(ints)
class ExtractAndSumScore(beam.PTransform):
"""A transform to extract key/score information and sum the scores.
The constructor argument `field` determines whether 'team' or 'user' info is
extracted.
"""
def __init__(self, field):
super(ExtractAndSumScore, self).__init__()
self.field = field
def expand(self, pcoll):
return (pcoll
| beam.Map(lambda info: (info[self.field], info['score']))
| beam.CombinePerKey(sum_ints))
def configure_bigquery_write():
def window_start_format(element, window):
dt = datetime.datetime.fromtimestamp(int(window.start))
return dt.strftime('%Y-%m-%d %H:%M:%S')
return [
('team', 'STRING', lambda e, w: e[0]),
('total_score', 'INTEGER', lambda e, w: e[1]),
('window_start', 'STRING', window_start_format),
]
class WriteWindowedToBigQuery(beam.PTransform):
"""Generate, format, and write BigQuery table row information.
This class may be used for writes that require access to the window
information.
"""
def __init__(self, table_name, dataset, field_info):
"""Initializes the transform.
Args:
table_name: Name of the BigQuery table to use.
dataset: Name of the dataset to use.
field_info: List of tuples that holds information about output table field
definitions. The tuples are in the
(field_name, field_type, field_fn) format, where field_name is
the name of the field, field_type is the BigQuery type of the
field and field_fn is a lambda function to generate the field
value from the element.
"""
super(WriteWindowedToBigQuery, self).__init__()
self.table_name = table_name
self.dataset = dataset
self.field_info = field_info
def get_schema(self):
"""Build the output table schema."""
return ', '.join(
'%s:%s' % (entry[0], entry[1]) for entry in self.field_info)
def get_table(self, pipeline):
"""Utility to construct an output table reference."""
project = pipeline.options.view_as(GoogleCloudOptions).project
return '%s:%s.%s' % (project, self.dataset, self.table_name)
class BuildRowFn(beam.DoFn):
"""Convert each key/score pair into a BigQuery TableRow as specified."""
def __init__(self, field_info):
super(WriteWindowedToBigQuery.BuildRowFn, self).__init__()
self.field_info = field_info
def process(self, element, window=beam.DoFn.WindowParam):
row = {}
for entry in self.field_info:
row[entry[0]] = entry[2](element, window)
yield row
def expand(self, pcoll):
table = self.get_table(pcoll.pipeline)
return (
pcoll
| 'ConvertToRow' >> beam.ParDo(
WriteWindowedToBigQuery.BuildRowFn(self.field_info))
| beam.io.Write(beam.io.BigQuerySink(
table,
schema=self.get_schema(),
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)))
def string_to_timestamp(datetime_str):
dt = datetime.datetime.strptime(datetime_str, '%Y-%m-%d-%H-%M')
epoch = datetime.datetime.utcfromtimestamp(0)
return (dt - epoch).total_seconds() * 1000.0
class HourlyTeamScore(beam.PTransform):
def __init__(self, start_min, stop_min, window_duration):
super(HourlyTeamScore, self).__init__()
self.start_min = start_min
self.stop_min = stop_min
self.window_duration = window_duration
def expand(self, pcoll):
start_min_filter = string_to_timestamp(self.start_min)
end_min_filter = string_to_timestamp(self.stop_min)
return (
pcoll
| 'ParseGameEvent' >> beam.ParDo(ParseEventFn())
# Filter out data before and after the given times so that it is not
# included in the calculations. As we collect data in batches (say, by
# day), the batch for the day that we want to analyze could potentially
# include some late-arriving data from the previous day. If so, we want
# to weed it out. Similarly, if we include data from the following day
# (to scoop up late-arriving events from the day we're analyzing), we
# need to weed out events that fall after the time period we want to
# analyze.
| 'FilterStartTime' >> beam.Filter(
lambda element: element['timestamp'] > start_min_filter)
| 'FilterEndTime' >> beam.Filter(
lambda element: element['timestamp'] < end_min_filter)
# Add an element timestamp based on the event log, and apply fixed
# windowing.
# Convert element['timestamp'] into seconds as expected by
# TimestampedValue.
| 'AddEventTimestamps' >> beam.Map(
lambda element: TimestampedValue(
element, element['timestamp'] / 1000.0))
# Convert window_duration into seconds as expected by FixedWindows.
| 'FixedWindowsTeam' >> beam.WindowInto(FixedWindows(
size=self.window_duration * 60))
# Extract and sum teamname/score pairs from the event data.
| 'ExtractTeamScore' >> ExtractAndSumScore('team'))
def run(argv=None):
"""Main entry point; defines and runs the hourly_team_score pipeline."""
parser = argparse.ArgumentParser()
# The default maps to two large Google Cloud Storage files (each ~12GB)
# holding two subsequent day's worth (roughly) of data.
parser.add_argument('--input',
dest='input',
default='gs://dataflow-samples/game/gaming_data*.csv',
help='Path to the data file(s) containing game data.')
parser.add_argument('--dataset',
dest='dataset',
required=True,
help='BigQuery Dataset to write tables to. '
'Must already exist.')
parser.add_argument('--table_name',
dest='table_name',
default='hourly_team_score',
help='The BigQuery table name. Should not already exist.')
parser.add_argument('--window_duration',
type=int,
default=60,
help='Numeric value of fixed window duration, in minutes')
parser.add_argument('--start_min',
dest='start_min',
default='1970-01-01-00-00',
help='String representation of the first minute after '
'which to generate results in the format: '
'yyyy-MM-dd-HH-mm. Any input data timestamped '
'prior to that minute won\'t be included in the '
'sums.')
parser.add_argument('--stop_min',
dest='stop_min',
default='2100-01-01-00-00',
help='String representation of the first minute for '
'which to generate results in the format: '
'yyyy-MM-dd-HH-mm. Any input data timestamped '
'after to that minute won\'t be included in the '
'sums.')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
p = beam.Pipeline(options=pipeline_options)
pipeline_options.view_as(SetupOptions).save_main_session = True
(p # pylint: disable=expression-not-assigned
| ReadFromText(known_args.input)
| HourlyTeamScore(
known_args.start_min, known_args.stop_min, known_args.window_duration)
| WriteWindowedToBigQuery(
known_args.table_name, known_args.dataset, configure_bigquery_write()))
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| apache-2.0 | 4,256,046,447,590,686,700 | 40.023729 | 80 | 0.665262 | false |
hiisi13/django-south-central | south_central/migration.py | 1 | 1816 | import os
from django.utils import importlib
class Migrations(list):
def __init__(self, app_name):
self._app_name = app_name
migrations_dir = self._ensure_migrations_dir(app_name)
self._load_migrations(migrations_dir)
def migrations_module(self):
return self._app_name + '.appmigrations'
def migrations_dir(self):
module_path = self.migrations_module()
try:
module = importlib.import_module(module_path)
except ImportError:
try:
parent = importlib.import_module(".".join(module_path.split(".")[:-1]))
except ImportError:
raise
else:
return os.path.join(os.path.dirname(parent.__file__), module_path.split(".")[-1])
else:
return os.path.dirname(module.__file__)
def _ensure_migrations_dir(self, app_name):
migrations_dir = self.migrations_dir()
if not os.path.isdir(migrations_dir):
os.mkdir(migrations_dir)
init_path = os.path.join(migrations_dir, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
return migrations_dir
def _load_migrations(self, migrations_dir):
filenames = []
for root, dirs, filenames in os.walk(migrations_dir):
self.extend([f.split('.')[0] for f in filenames
if f.endswith('.py') and not f.startswith('__init__')])
def next_filename(self, name):
highest_number = 0
for filename in self:
try:
number = int(filename.split("_")[0])
highest_number = max(highest_number, number)
except ValueError:
pass
return "%04i_%s.py" % (highest_number + 1, name)
| bsd-3-clause | 2,838,243,364,388,195,000 | 32.018182 | 97 | 0.560022 | false |
bvalot/panISa | validate/validate.py | 1 | 3556 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##Copyright (c) 2017 Benoit Valot and Panisa Treepong
##[email protected]
##UMR 6249 Chrono-Environnement, Besançon, France
##Licence GPL
"""Validate panISa [search (IS) insertion on a genome] by simulation read"""
import sys
import argparse
import tempfile
import subprocess
from os import listdir
from util import siminsertseq
from util import simread
from util import managefiledir as man
from util.genreport import GenReport
desc = "Validate panISa [search (IS) insertion on a genome] by simulation read"
command = argparse.ArgumentParser(prog='validate.py', \
description=desc, usage='%(prog)s [options] fasta isinfo')
command.add_argument('fasta', nargs="?", type=argparse.FileType("r"),\
help='Input fasta file')
command.add_argument('isinfo', nargs="?", type=argparse.FileType("r"),\
help='Input tabular file that store information of ISs')
command.add_argument('-o', '--output', nargs="?", type=argparse.FileType("w"), \
default=sys.stdout,\
help='Return comparation report between panISa and simulation, default=stdout')
command.add_argument('-n', '--number_IS', nargs="?", type=int, default=30, \
help='Number of ISs which were randomly added in simulation, default=30')
command.add_argument('-c', '--coverage', nargs="?", type=int, default=60, \
help='Mean coverage for simulation read, default=60')
command.add_argument('-l', '--length', nargs="?", type=int, default=150, \
help='Length of the first and second reads, default=150')
command.add_argument('-s', '--size', nargs="?", type=int, default=15, \
help='Maximun size of direct repeat region, default=15')
command.add_argument('-q', '--quality', nargs="?", type=int, default=20, \
help='Min alignment quality value to conserved a clip read, default=20')
command.add_argument('-t', '--thread', nargs="?", type=int, default=1, \
help='Number of thread to performed bwa mapping, default=1')
command.add_argument('-v', '--version', action='version', \
version='%(prog)s 0.1.0')
if __name__=='__main__':
"""Performed job on execution script"""
args = command.parse_args()
##[Prepare temp files and temp directory for all outputs]
temp_dir = tempfile.mkdtemp()+"/" ##[Prepare directory for simulated read output]
simisinfo_resultfile = tempfile.NamedTemporaryFile()
panisa_resultfile = tempfile.NamedTemporaryFile()
##[Simulate the complete genome sequence with ISs]
simfasta_file = siminsertseq.getSimIS(args.isinfo.name,args.fasta.name, \
temp_dir,simisinfo_resultfile.name,args.number_IS)
##[Move original fasta file and simulated complete genome as the same place]
ref_file = temp_dir+args.fasta.name.split("/")[-1]
man.copyfile(args.fasta.name,ref_file)
##[Simulate read from simulated complete genome]
simread.simread(ref_file,simfasta_file,args.length,args.coverage,temp_dir,args.thread)
##[Remove intermediate files of read simulation process]
readfile = simfasta_file.split("/")[-1].split(".fasta")[0]+".bam"
man.rmexceptfile(temp_dir,readfile)
##[Call panISa.py]
cmdIS = "../panISa.py %s -o %s -s %s -q %s"%(temp_dir+readfile,\
panisa_resultfile.name,args.size,args.quality)
subprocess.call(cmdIS, shell=True)
##[Create comparation report]
report = GenReport()
report.processReport(simisinfo_resultfile.name,panisa_resultfile.name,args.output)
##[Clear temp files]
simisinfo_resultfile.close()
panisa_resultfile.close()
man.removedir(temp_dir)
| gpl-3.0 | 8,381,343,015,888,777,000 | 41.831325 | 90 | 0.699859 | false |
isaacdarcilla/modules | fsteam/bluetoothRfcomm.py | 1 | 2480 | # coding: utf-8
# Copyright (c) 2015-2016 Free Security Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#This vulnerability targeted the Bluetooth RFCOMM transport protocol
from colored import *
from bluetooth import *
from time import *
print '%s Loading Bluetooth RFCOMM Module%s' % (fg(3), attr(0))
sleep(4)
print "\n\tBluetooth RFCOMM Scanner [\033[1;31mbluetooth/btrfcomm\033[1;m] | * scan for an open\n\t RFCOMM port protocol and prints in screen.\n\n\tUsage: use [module]\n\t[BTADDR] - specify bluetooth address.\n\t[MVALUE] - specify maximum value.\n"
try:
tgtBt = raw_input("%s[CMD-LINE]:[BTADDR]: %s" %(fg(1), attr(0)))
print (fore.LIGHT_BLUE+" BTADDR:["+style.RESET + tgtBt +fore.LIGHT_BLUE+"]"+style.RESET )
maxVal = int(raw_input("%s[CMD-LINE]:[MVALUE]: %s" %(fg(1), attr(0))))
print (fore.LIGHT_BLUE+" BTADDR:["+style.RESET + str(maxVal) +fore.LIGHT_BLUE+"]"+style.RESET )
print "\n[+] Scanning Bluetooth Address \'"+tgtBt+"\'"+"\n"
def rfcommCon(addr, port):
sock = BluetoothSocket(RFCOMM)
try:
sock.connect((addr, port))
print '[+] RFCOMM Port ' + str(port) + ' open'
sock.close()
except Exception, e:
print '[-] RFCOMM Port ' + str(port) + ' closed'
for port in range(1, maxVal):
rfcommCon(tgtBt, port)
except KeyboardInterrupt:
print (fore.BLUE+"\n\n[+] Interrupted by user. Terminating.\n"+style.RESET)
except Exception, error: #handle exceptions
#log(error)
print "[-] Error Found: "+ str(error)+"\n"
| gpl-2.0 | -3,407,135,600,620,018,000 | 43.285714 | 248 | 0.720161 | false |
gg7/diamond | src/diamond/collector.py | 1 | 18990 | # coding=utf-8
"""
The Collector class is a base class for all metric collectors.
"""
import os
import socket
import platform
import logging
import configobj
import time
import re
import subprocess
import signal
from diamond.metric import Metric
from diamond.utils.config import load_config
from error import DiamondException
# Detect the architecture of the system and set the counters for MAX_VALUES
# appropriately. Otherwise, rolling over counters will cause incorrect or
# negative values.
if platform.architecture()[0] == '64bit':
MAX_COUNTER = (2 ** 64) - 1
else:
MAX_COUNTER = (2 ** 32) - 1
def get_hostname(config, method=None):
"""
Returns a hostname as configured by the user
"""
method = method or config.get('hostname_method', 'smart')
# case insensitive method
method = method.lower()
if 'hostname' in config and method != 'shell':
return config['hostname']
if method in get_hostname.cached_results:
return get_hostname.cached_results[method]
if method == 'shell':
if 'hostname' not in config:
raise DiamondException(
"hostname must be set to a shell command for"
" hostname_method=shell")
else:
proc = subprocess.Popen(config['hostname'],
shell=True,
stdout=subprocess.PIPE)
hostname = proc.communicate()[0].strip()
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode,
config['hostname'])
get_hostname.cached_results[method] = hostname
return hostname
if method == 'smart':
hostname = get_hostname(config, 'fqdn_short')
if hostname != 'localhost':
get_hostname.cached_results[method] = hostname
return hostname
hostname = get_hostname(config, 'hostname_short')
get_hostname.cached_results[method] = hostname
return hostname
if method == 'fqdn_short':
hostname = socket.getfqdn().split('.')[0]
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'fqdn':
hostname = socket.getfqdn().replace('.', '_')
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'fqdn_rev':
hostname = socket.getfqdn().split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'uname_short':
hostname = os.uname()[1].split('.')[0]
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'uname_rev':
hostname = os.uname()[1].split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'hostname':
hostname = socket.gethostname()
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'hostname_short':
hostname = socket.gethostname().split('.')[0]
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'hostname_rev':
hostname = socket.gethostname().split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'none':
get_hostname.cached_results[method] = None
return None
raise NotImplementedError(config['hostname_method'])
get_hostname.cached_results = {}
def str_to_bool(value):
"""
Converts string truthy/falsey strings to a bool
Empty strings are false
"""
if isinstance(value, basestring):
value = value.strip().lower()
if value in ['true', 't', 'yes', 'y']:
return True
elif value in ['false', 'f', 'no', 'n', '']:
return False
else:
raise NotImplementedError("Unknown bool %s" % value)
return value
class Collector(object):
"""
The Collector class is a base class for all metric collectors.
"""
def __init__(self, config=None, handlers=[], name=None, configfile=None):
"""
Create a new instance of the Collector class
"""
# Initialize Logger
self.log = logging.getLogger('diamond')
# Initialize Members
if name is None:
self.name = self.__class__.__name__
else:
self.name = name
# Reset signal handlers of forks/threads
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.handlers = handlers
self.last_values = {}
self.configfile = None
self.load_config(configfile, config)
def load_config(self, configfile=None, override_config=None):
"""
Process a configfile, or reload if previously given one.
"""
self.config = configobj.ConfigObj()
# Load in the collector's defaults
if self.get_default_config() is not None:
self.config.merge(self.get_default_config())
if configfile is not None:
self.configfile = os.path.abspath(configfile)
if self.configfile is not None:
config = load_config(self.configfile)
if 'collectors' in config:
if 'default' in config['collectors']:
self.config.merge(config['collectors']['default'])
if self.name in config['collectors']:
self.config.merge(config['collectors'][self.name])
if override_config is not None:
if 'collectors' in override_config:
if 'default' in override_config['collectors']:
self.config.merge(override_config['collectors']['default'])
if self.name in override_config['collectors']:
self.config.merge(override_config['collectors'][self.name])
self.process_config()
def process_config(self):
"""
Intended to put any code that should be run after any config reload
event
"""
if 'byte_unit' in self.config:
if isinstance(self.config['byte_unit'], basestring):
self.config['byte_unit'] = self.config['byte_unit'].split()
if 'enabled' in self.config:
self.config['enabled'] = str_to_bool(self.config['enabled'])
if 'measure_collector_time' in self.config:
self.config['measure_collector_time'] = str_to_bool(
self.config['measure_collector_time'])
# Raise an error if both whitelist and blacklist are specified
if ((self.config.get('metrics_whitelist', None) and
self.config.get('metrics_blacklist', None))):
raise DiamondException(
'Both metrics_whitelist and metrics_blacklist specified ' +
'in file %s' % self.configfile)
if self.config.get('metrics_whitelist', None):
self.config['metrics_whitelist'] = re.compile(
self.config['metrics_whitelist'])
elif self.config.get('metrics_blacklist', None):
self.config['metrics_blacklist'] = re.compile(
self.config['metrics_blacklist'])
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this collector
"""
return {
'enabled': 'Enable collecting these metrics',
'byte_unit': 'Default numeric output(s)',
'measure_collector_time': 'Collect the collector run time in ms',
'metrics_whitelist': 'Regex to match metrics to transmit. ' +
'Mutually exclusive with metrics_blacklist',
'metrics_blacklist': 'Regex to match metrics to block. ' +
'Mutually exclusive with metrics_whitelist',
}
def get_default_config(self):
"""
Return the default config for the collector
"""
return {
# Defaults options for all Collectors
# Uncomment and set to hardcode a hostname for the collector path
# Keep in mind, periods are seperators in graphite
# 'hostname': 'my_custom_hostname',
# If you perfer to just use a different way of calculating the
# hostname
# Uncomment and set this to one of these values:
# fqdn_short = Default. Similar to hostname -s
# fqdn = hostname output
# fqdn_rev = hostname in reverse (com.example.www)
# uname_short = Similar to uname -n, but only the first part
# uname_rev = uname -r in reverse (com.example.www)
# 'hostname_method': 'fqdn_short',
# All collectors are disabled by default
'enabled': False,
# Path Prefix
'path_prefix': 'servers',
# Path Prefix for Virtual Machine metrics
'instance_prefix': 'instances',
# Path Suffix
'path_suffix': '',
# Default Poll Interval (seconds)
'interval': 300,
# Default Event TTL (interval multiplier)
'ttl_multiplier': 2,
# Default numeric output
'byte_unit': 'byte',
# Collect the collector run time in ms
'measure_collector_time': False,
# Whitelist of metrics to let through
'metrics_whitelist': None,
# Blacklist of metrics to let through
'metrics_blacklist': None,
}
def get_metric_path(self, name, instance=None):
"""
Get metric path.
Instance indicates that this is a metric for a
virtual machine and should have a different
root prefix.
"""
if 'path' in self.config:
path = self.config['path']
else:
path = self.__class__.__name__
if instance is not None:
if 'instance_prefix' in self.config:
prefix = self.config['instance_prefix']
else:
prefix = 'instances'
if path == '.':
return '.'.join([prefix, instance, name])
else:
return '.'.join([prefix, instance, path, name])
if 'path_prefix' in self.config:
prefix = self.config['path_prefix']
else:
prefix = 'systems'
if 'path_suffix' in self.config:
suffix = self.config['path_suffix']
else:
suffix = None
hostname = get_hostname(self.config)
if hostname is not None:
if prefix:
prefix = ".".join((prefix, hostname))
else:
prefix = hostname
# if there is a suffix, add after the hostname
if suffix:
prefix = '.'.join((prefix, suffix))
is_path_invalid = path == '.' or not path
if is_path_invalid and prefix:
return '.'.join([prefix, name])
elif prefix:
return '.'.join([prefix, path, name])
elif is_path_invalid:
return name
else:
return '.'.join([path, name])
def get_hostname(self):
return get_hostname(self.config)
def collect(self):
"""
Default collector method
"""
raise NotImplementedError()
def publish(self, name, value, raw_value=None, precision=0,
metric_type='GAUGE', instance=None):
"""
Publish a metric with the given name
"""
# Check whitelist/blacklist
if self.config['metrics_whitelist']:
if not self.config['metrics_whitelist'].match(name):
return
elif self.config['metrics_blacklist']:
if self.config['metrics_blacklist'].match(name):
return
# Get metric Path
path = self.get_metric_path(name, instance=instance)
# Get metric TTL
ttl = float(self.config['interval']) * float(
self.config['ttl_multiplier'])
# Create Metric
try:
metric = Metric(path, value, raw_value=raw_value, timestamp=None,
precision=precision, host=self.get_hostname(),
metric_type=metric_type, ttl=ttl)
except DiamondException:
self.log.error(('Error when creating new Metric: path=%r, '
'value=%r'), path, value)
raise
# Publish Metric
self.publish_metric(metric)
def publish_metric(self, metric):
"""
Publish a Metric object
"""
# Process Metric
for handler in self.handlers:
handler._process(metric)
def publish_gauge(self, name, value, precision=0, instance=None):
return self.publish(name, value, precision=precision,
metric_type='GAUGE', instance=instance)
def publish_counter(self, name, value, precision=0, max_value=0,
time_delta=True, interval=None, allow_negative=False,
instance=None):
raw_value = value
value = self.derivative(name, value, max_value=max_value,
time_delta=time_delta, interval=interval,
allow_negative=allow_negative,
instance=instance)
return self.publish(name, value, raw_value=raw_value,
precision=precision, metric_type='COUNTER',
instance=instance)
def derivative(self, name, new, max_value=0,
time_delta=True, interval=None,
allow_negative=False, instance=None):
"""
Calculate the derivative of the metric.
"""
# Format Metric Path
path = self.get_metric_path(name, instance=instance)
if path in self.last_values:
old = self.last_values[path]
# Check for rollover
if new < old:
old = old - max_value
# Get Change in X (value)
derivative_x = new - old
# If we pass in a interval, use it rather then the configured one
if interval is None:
interval = float(self.config['interval'])
# Get Change in Y (time)
if time_delta:
derivative_y = interval
else:
derivative_y = 1
result = float(derivative_x) / float(derivative_y)
if result < 0 and not allow_negative:
result = 0
else:
result = 0
# Store Old Value
self.last_values[path] = new
# Return result
return result
def _run(self):
"""
Run the collector unless it's already running
"""
try:
start_time = time.time()
# Collect Data
self.collect()
end_time = time.time()
collector_time = int((end_time - start_time) * 1000)
self.log.debug('Collection took %s ms', collector_time)
if 'measure_collector_time' in self.config:
if self.config['measure_collector_time']:
metric_name = 'collector_time_ms'
metric_value = collector_time
self.publish(metric_name, metric_value)
finally:
# After collector run, invoke a flush
# method on each handler.
for handler in self.handlers:
handler._flush()
def find_binary(self, binary):
"""
Scan and return the first path to a binary that we can find
"""
if os.path.exists(binary):
return binary
# Extract out the filename if we were given a full path
binary_name = os.path.basename(binary)
# Gather $PATH
search_paths = os.environ['PATH'].split(':')
# Extra paths to scan...
default_paths = [
'/usr/bin',
'/bin'
'/usr/local/bin',
'/usr/sbin',
'/sbin'
'/usr/local/sbin',
]
for path in default_paths:
if path not in search_paths:
search_paths.append(path)
for path in search_paths:
if os.path.isdir(path):
filename = os.path.join(path, binary_name)
if os.path.exists(filename):
return filename
return binary
class ProcessCollector(Collector):
"""
Collector with helpers for handling running commands with/without sudo
"""
def get_default_config_help(self):
config_help = super(ProcessCollector, self).get_default_config_help()
config_help.update({
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ProcessCollector, self).get_default_config()
config.update({
'use_sudo': False,
'sudo_cmd': self.find_binary('/usr/bin/sudo'),
})
return config
def run_command(self, args):
if 'bin' not in self.config:
raise Exception('config does not have any binary configured')
if not os.access(self.config['bin'], os.X_OK):
raise Exception('%s is not executable' % self.config['bin'])
try:
command = args
command.insert(0, self.config['bin'])
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
return subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()
except OSError:
self.log.exception("Unable to run %s", command)
return None
| mit | -7,991,858,011,176,806,000 | 31.96875 | 79 | 0.551132 | false |
RHInception/re-worker-output | replugin/outputworker/__init__.py | 1 | 4228 | # -*- coding: utf-8 -*-
# Copyright © 2014 SEE AUTHORS FILE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Output worker.
"""
import os
import re
from reworker.worker import Worker
from jinja2 import escape
class OutputWorkerError(Exception):
"""
Base exception class for OutputWorker errors.
"""
pass
class OutputWorker(Worker):
"""
Worker which collects output messages and writes them out.
"""
def __init__(self, *args, **kwargs):
Worker.__init__(self, *args, **kwargs)
# There are no redactions by default
self._redaction_rx = None
# Attempt to pull redactions from the worker config
redaction_cfg = self._config.get('redactions', [])
if redaction_cfg:
# if redactions exist in the config then build a regex from
# the config to use for substitution.
redaction_rx_build = '('
for redaction in redaction_cfg:
redaction_str = '.*%s[^\\n]*\\n|' % redaction
self.app_logger.debug('Adding "%s"' % redaction)
redaction_rx_build += redaction_str
# Remove the last | and end the regex
redaction_rx_build = redaction_rx_build[0:-1] + ')'
self._redaction_rx = re.compile(redaction_rx_build)
self.app_logger.info('Redactions are turned on.')
self.app_logger.debug('Redaction RX: %s' % redaction_rx_build)
def process(self, channel, basic_deliver, properties, body, output):
"""
Writes out output messages from the bus.
.. note::
Since this is an output worker it does not send messages to be
consumed by notification or output workers.
*Keys Requires*:
* message: the message to write out.
"""
# Ack the original message
self.ack(basic_deliver)
corr_id = str(properties.correlation_id)
# TODO: decide if we need 'start/stop' for these kinds of workers
# Notify we are starting
# self.send(
# properties.reply_to, corr_id, {'status': 'started'}, exchange='')
try:
try:
message = str(body['message'])
except KeyError:
raise OutputWorkerError('No message given. Nothing to do!')
# Write out the message
file_path = os.path.sep.join([
self._config['output_dir'], ('%s.log' % corr_id)])
with open(file_path, 'a') as output_file:
if not message.endswith('\n'):
message = message + '\n'
message = message.replace('\\n', "\n")
# escape HTML out
message = escape(message)
if self._redaction_rx:
message, subbed = self._redaction_rx.subn(
'[redacted]\n', message)
if subbed:
self.app_logger.info(
'Redacted a line in corr_id %s' % corr_id)
# If anyone wants to make things pretty with HTML start here
output_file.write(message)
# Send out responses
self.app_logger.info('Wrote output for correlation_id %s' % (
corr_id))
except OutputWorkerError, fwe:
# If a OutputWorkerError happens send a failure log it.
self.app_logger.error('Failure: %s' % fwe)
def main(): # pragma: no cover
from reworker.worker import runner
runner(OutputWorker)
if __name__ == '__main__': # pragma nocover
main()
| agpl-3.0 | 8,644,221,077,200,686,000 | 35.128205 | 78 | 0.585285 | false |
rc500/ardrone_archive_aarons_laptop | ardrone/native.py | 1 | 1120 | """Module to handle importing native modules via ctypes.
"""
import ctypes
import os
import logging
log = logging.getLogger()
def load_dll(name):
"""Attempt to load a DLL from the lib directory under ardrone/native.
Appropriate platform suffices are attempted.
*name* is the filename (without platform extension, e.g. ".dll", ".so") of
the DLL to load.
Returns a ctyles CDLL instance if successful or None on failure.
"""
# Find out which directour _this_ file is in
this_dir = os.path.abspath(os.path.dirname(__file__))
# And hence the native libdir
native_dir = os.path.join(this_dir, 'native')
for lib_dir in ['', 'bin', 'lib']:
for suffix in ['', '.so', '.dll']:
dllpath = os.path.join(native_dir, lib_dir, name + suffix)
if os.path.exists(dllpath):
try:
dll = ctypes.CDLL(dllpath)
return dll
except OSError as e:
# Report DLL load errors and try to continue
log.warning('Error loading %s: %s' % (dllpath, str(e)))
log.error('Failed to find or load DLL "%s" in the native directory.' % (name,))
return None
| apache-2.0 | -5,403,136,657,152,677,000 | 27 | 81 | 0.644643 | false |
grzanka/ibodata | ibodata/lateral_profile.py | 1 | 6329 | import math
import numpy as np
from beprof.profile import Profile
class LateralProfile(Profile):
def penumbra_right(self):
"""
In case of good data returns floating-point number
In case of corrupted data returns nan
"""
return self.x_at_y(0.1, True) - self.x_at_y(0.9, True)
def penumbra_left(self):
"""
In case of good data returns floating-point number
In case of corrupted data returns nan
"""
return self.x_at_y(0.9) - self.x_at_y(0.1)
def field_ratio(self, level):
"""
In case of good data returns floating-point number
Level has to be >= 0.0 and <= 1 or exception is raised
In case of corrupted data returns nan
"""
if level < 0.0 or level > 1.0:
raise ValueError("Expected level to be between 0.0 and 1.0")
return self.width(level) / self.width(0.5)
def symmetry(self, level):
"""
In case of good data returns floating-point number
Level has to be >= 0 and <= 1 or exception is raised
In case of corrupted data returns nan
"""
if level < 0 or level > 1.0:
raise ValueError("Expected level to be between 0 and 1")
a = math.fabs(self.x_at_y(level, False))
b = math.fabs(self.x_at_y(level, True))
return (math.fabs(a - b) / (a + b)) * 200.0
def flatness_50(self):
"""
Returns floating-point number
In case of corrupted data returns nan
Returns value between points if max/min value occurs on border
"""
d = (self.penumbra_left() + self.penumbra_right()) / 2
if np.isnan(d):
return np.nan
left = self.x_at_y(0.5) + 2.0 * d
right = self.x_at_y(0.5, True) - 2.0 * d
p_max = np.max(np.append(self.y[np.logical_and(self.x <= right, self.x >= left)],
[self.y_at_x(right), self.y_at_x(left)]))
p_min = np.min(np.append(self.y[np.logical_and(self.x <= right, self.x >= left)],
[self.y_at_x(right), self.y_at_x(left)]))
return ((p_max - p_min) / (p_max + p_min)) * 100.0
def flatness_90(self):
"""
Returns floating-point number
In case of corrupted data returns nan
Returns value between points if max/min value occurs on border
"""
d = (self.penumbra_left() + self.penumbra_right()) / 2
if np.isnan(d):
return np.nan
left = self.x_at_y(0.9) + d
right = self.x_at_y(0.9, True) - d
p_max = np.max(
np.append(self.y[np.logical_and(self.x <= right, self.x >= left)], [self.y_at_x(right), self.y_at_x(left)]))
p_min = np.min(
np.append(self.y[np.logical_and(self.x <= right, self.x >= left)], [self.y_at_x(right), self.y_at_x(left)]))
return ((p_max - p_min) / (p_max + p_min)) * 100.0
def asymmetry(self):
"""
If everything is fine returns float
In case of corrupted data returns nan
Add area between mid and the nearest points if there's no 0 value in self.x
np.trapz() in some cases returns DataSet and sometimes it returns float.
To avoid problems with types return values from np.trapz() has to be converted to float
"""
area_left = float(np.trapz(x=self.x[self.x <= 0], y=self.y[self.x <= 0]))
area_right = float(np.trapz(x=self.x[self.x >= 0], y=self.y[self.x >= 0]))
if np.argwhere(self.x == 0).size == 0:
left_index_arr = np.argwhere(self.x < 0)
area_left += float(np.trapz(x=(self.x[left_index_arr[-1]], .0),
y=(self.y[left_index_arr[-1]], self.y_at_x(0))))
right_index_arr = np.argwhere(self.x > 0)
area_right += float(np.trapz(x=(.0, self.x[right_index_arr[0]]),
y=(self.y_at_x(0), self.y[right_index_arr[0]])))
result = ((area_left - area_right) / (area_left + area_right)) * 100.0
return result
def normalize(self, dt, allow_cast=True):
"""
Doesn't return anything
In case of corrupted data raises ValueError
Translate y to bring y.min() to 0 (noise substraction) and then
normalize to 1 over [-dt, +dt] area from the mid of the profile
if allow_cast is set to True, division not in place and casting may occur.
If division in place is not possible and allow_cast is False
an exception is raised.
"""
# check if division is possible
# If self.y is float, then nothing happens.
# In case it has integers type will stay the same,
# but casting may occur
try:
self.y /= 1.0
except TypeError:
if not allow_cast:
raise TypeError("Division in place is not possible and casting is not allowed")
self.y -= self.y.min()
a = self.y.max() / 2.0
w = self.width(a)
if np.isnan(w):
raise ValueError("Part of profile is missing.")
mid = self.x_at_y(a) + w / 2.0
# type of mid is float so if self.x is made of integers we can't use subtraction in place
if allow_cast:
self.x = self.x - mid
else:
self.x -= mid
norm_section_y = self.y[np.fabs(self.x) <= dt]
norm_section_x = self.x[np.fabs(self.x) <= dt]
area = np.trapz(x=norm_section_x, y=norm_section_y)
"""
if there's no point on the edge normalization is not perfectly accurate
and we are normalizing over smaller area than [-dt, +dt]
That's why we interpolate points on the edge below.
"""
if np.argwhere(self.x == -dt).size == 0:
coords_y = (self.y_at_x(-dt), norm_section_y[0])
coords_x = (-dt, norm_section_x[0])
area += np.trapz(x=coords_x, y=coords_y)
if np.argwhere(self.x == dt).size == 0:
coords_y = (norm_section_y[-1], self.y_at_x(dt))
coords_x = (norm_section_x[-1], dt)
area += np.trapz(x=coords_x, y=coords_y)
ave = area / (2.0 * dt)
if allow_cast:
self.y = self.y / ave
else:
self.y /= ave
| gpl-3.0 | -85,395,117,010,977,300 | 37.591463 | 120 | 0.54748 | false |
BasementCat/bubblenet | tests/test_addresses.py | 1 | 4663 | import os
import unittest
from bubblenet.errors import AddressParseError
from bubblenet.addresses import (
Address,
IPAddress,
IPv4Address,
IPv6Address,
UnixSocketAddress,
)
class ParsingTest(unittest.TestCase):
# Test cases taken from https://en.wikipedia.org/wiki/Module:IPAddress/testcases
valid_v4 = [
'200.200.200.200',
'0.0.0.0',
'255.255.255.255',
]
invalid_v4 = [
' 200.200.200.200', # whitespace not currently allowed
'200.200.200.200 ', # whitespace not currently allowed
'200.200.256.200',
'200.200.200.200.',
'200.200.200',
'200.200.200.2d0',
'00.00.00.00', # according to talkpage, leading zeroes unacceptable.
'100.100.020.100', # according to talkpage, leading zeroes unacceptable.
'-1.0.0.0',
'200000000000000000000000000000000000000000000000000000000000000000000000000000.200.200.200',
'00000000000005.10.10.10',
]
valid_v6 = [
('00AB:0002:3008:8CFD:00AB:0002:3008:8CFD', # full length
'00ab:0002:3008:8cfd:00ab:0002:3008:8cfd'),
('00ab:0002:3008:8cfd:00ab:0002:3008:8cfd', # lowercase
'00ab:0002:3008:8cfd:00ab:0002:3008:8cfd'),
('00aB:0002:3008:8cFd:00Ab:0002:3008:8cfD', # mixed case
'00ab:0002:3008:8cfd:00ab:0002:3008:8cfd'),
('AB:02:3008:8CFD:AB:02:3008:8CFD', # abbreviated
'00ab:0002:3008:8cfd:00ab:0002:3008:8cfd'),
('AB:02:3008:8CFD::02:3008:8CFD', # correct use of ::
'00ab:0002:3008:8cfd:0000:0002:3008:8cfd'),
('::', # unassigned IPv6 address
'0000:0000:0000:0000:0000:0000:0000:0000'),
('::1', # loopback IPv6 address
'0000:0000:0000:0000:0000:0000:0000:0001'),
('0::', # another name for unassigned IPv6 address
'0000:0000:0000:0000:0000:0000:0000:0000'),
('0::0', # another name for unassigned IPv6 address
'0000:0000:0000:0000:0000:0000:0000:0000'),
]
invalid_v6 = [
'00AB:00002:3008:8CFD:00AB:0002:3008:8CFD', # at most 4 digits per segment
':0002:3008:8CFD:00AB:0002:3008:8CFD', # can't remove all 0s from first segment unless using ::
'00AB:0002:3008:8CFD:00AB:0002:3008:', # can't remove all 0s from last segment unless using ::
'AB:02:3008:8CFD:AB:02:3008:8CFD:02', # too long
'AB:02:3008:8CFD::02:3008:8CFD:02', # too long
'AB:02:3008:8CFD::02::8CFD', # can't have two ::s
'GB:02:3008:8CFD:AB:02:3008:8CFD', # Invalid character G
'2:::3', # illegal: three colons
]
valid_unix = [
('foo.sock', lambda v: v + '/foo.sock'),
('bar/foo.sock', lambda v: v + '/bar/foo.sock'),
('baz/../bar.sock', lambda v: v + '/bar.sock'),
]
def test_valid_address_v4(self):
for addr in self.valid_v4:
self.assertEquals(addr, str(IPv4Address(addr)), msg=repr(addr))
def test_invalid_address_v4(self):
for addr in self.invalid_v4:
try:
IPv4Address(addr)
except AddressParseError:
continue
self.assertTrue(False, msg=repr(addr))
def test_valid_address_v6(self):
for addr, correct in self.valid_v6:
try:
parsed = str(IPv6Address(addr))
self.assertEquals(correct, parsed, msg=repr(correct) + ' != ' + repr(parsed))
except AddressParseError as e:
self.assertTrue(False, str(e))
def test_invalid_address_v6(self):
for addr in self.invalid_v6:
try:
IPv6Address(addr)
except AddressParseError:
continue
self.assertTrue(False, msg=repr(addr))
def test_valid_address_unix(self):
for addr, correct in self.valid_unix:
try:
parsed = str(UnixSocketAddress(addr))
self.assertEquals(correct(os.getcwd()), parsed, msg=repr(correct(os.getcwd())) + ' != ' + repr(parsed))
except AddressParseError as e:
self.assertTrue(False, str(e))
def test_pick_type(self):
for addr in self.valid_v4:
self.assertIsInstance(Address.from_string(addr), IPv4Address)
for addr, correct in self.valid_v6:
self.assertIsInstance(Address.from_string(addr), IPv6Address)
for addr, correct in self.valid_unix:
self.assertIsInstance(Address.from_string(addr), UnixSocketAddress)
| mit | 8,622,519,045,403,237,000 | 38.903509 | 119 | 0.573451 | false |
NicovincX2/Python-3.5 | Statistiques/Estimation (statistique)/Régression/Régression logistique/logistic_path.py | 1 | 1070 | # -*- coding: utf-8 -*-
import os
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
os.system("pause")
| gpl-3.0 | 8,527,585,440,940,499,000 | 20.4 | 79 | 0.626168 | false |
handshakinglemma/wedther | wedther.py | 1 | 2543 | # wedther
# Generates tweets for the current weather in Edmonton, AB.
# Tweets the current temperature and an emoji that represents the
# current weather conditions.
# If there is a current weather alert, it also tweets that.
import tweepy
from secret_wedther import *
from get_weather import get_weather
def main():
# Weather dictionary that assigns an emoji to each condition.
weather_dict = {'A few clouds':'a few \u2601',
'A mix of sun and cloud':'a mix of \u2600 and \u2601',
'Chance of flurries':'chance of \u2744',
'Chance of showers':'chance of \u2614',
'Clear':'clear',
'Cloudy':'\u2601',
'Mainly cloudy':'mainly \u2601',
'Mainly sunny':'mainly \u2600',
'Overcast':'\u26C5',
'Partly cloudy':'partly \u2601',
'Periods of light snow':'periods of light \u2744',
'Periods of snow':'periods of \u2744',
'Rain':'\u2614',
'Snow':'\u2603',
'Sunny':'\u2600',
'Thunderstorm':'\u26C8'}
# Twitter stuff.
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
# Grab the weather information.
weather = get_weather()
temperature = str(weather[0])
picture = str(weather[1])
weather_picture = weather_dict.get(picture)
# Make the tweet!
tweet = 'The weather right now is ' + temperature + ' and ' + weather_picture
# Get the last tweet from a file.
filename = 'last_tweet.txt'
tweet_file = open(filename, 'r+')
last_tweet = tweet_file.readline().strip()
# Reword the new tweet if it is a duplicate of the last tweet.
if last_tweet == tweet:
tweet = 'The weather remains ' + temperature + ' and ' + weather_picture
elif 'The weather remains' in last_tweet:
if last_tweet[20:23] == tweet[20:23]:
tweet = 'The weather is still ' + temperature + ' and ' + weather_picture
elif 'The weather is still' in last_tweet:
if last_tweet[21:24] == tweet[21:24]:
tweet = 'The weather continues to be ' + temperature + ' and ' + weather_picture
tweet_file.close()
# Write the new tweet to the file.
tweet_file = open(filename, 'w')
tweet_file.write(tweet)
tweet_file.close()
# Tweet the tweet!
api.update_status(tweet)
main()
| mit | -2,016,450,442,182,445,000 | 35.328571 | 92 | 0.578451 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.