repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
bmars/sisko | sisko/utils.py | 1 | 2204 | # Copyright (C) 2014 Brian Marshall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GLib, Gio
# This mapping of icon names to descriptions was borrowed from Nautilus.
_BASIC_CONTENT_TYPES = {
'application-x-executable': _("Program"),
'audio-x-generic': _("Audio"),
'font-x-generic': _("Font"),
'image-x-generic': _("Image"),
'package-x-generic': _("Archive"),
'text-html': _("Markup"),
'text-x-generic': _("Text"),
'text-x-script': _("Program"),
'video-x-generic': _("Video"),
'x-office-document': _("Document"),
'x-office-presentation': _("Document"),
'x-office-spreadsheet': _("Document")
}
def format_time(time: GLib.TimeVal):
"""
Return a short string representing the time. The format will differ
depending on how close to the current date it is.
"""
dt = GLib.DateTime.new_from_timeval_local(time)
now = GLib.DateTime.new_now_local()
if dt.get_ymd() == now.get_ymd():
return dt.format(_("%-I:%M %P"))
elif dt.get_year() == now.get_year():
return dt.format(_("%b %-d"))
else:
return dt.format(_("%b %-d %Y"))
def get_basic_content_type(content_type: str):
"""
Return a short string describing the content type.
"""
if content_type == 'inode/directory':
return _("Folder")
elif content_type == 'inode/symlink':
return _("Link")
elif Gio.content_type_is_unknown(content_type):
return _("Binary")
else:
return _BASIC_CONTENT_TYPES.get(
Gio.content_type_get_generic_icon_name(content_type), _("Unknown"))
| gpl-3.0 | -8,794,476,572,636,623,000 | 35.733333 | 79 | 0.648367 | false |
mtbarta/neural-tagger | src/models/dconv3Experiments/skipconn.py | 1 | 21980 | #!/usr/bin/python
# -*- coding: utf8
import tensorflow as tf
import numpy as np
from src.util.tf import tensorToSeq, seqToTensor, revlut
import math
from google.protobuf import text_format
from tensorflow.python.platform import gfile
import json
import os
from collections import defaultdict
import src.models.tf_utils as tf_utils
from src.models.initializers import identity_initializer, orthogonal_initializer
def _xform(arr, words_vocab, chars_vocab, mxlen, maxw):
"""
transforms a single feature vector into a feed dict for the model.
"""
batch = defaultdict(list)
for i in arr:
xs_ch = np.zeros((mxlen, maxw), dtype=np.int)
xs = np.zeros((mxlen), dtype=np.int)
ys = np.zeros((mxlen), dtype=np.int)
v = i
length = mxlen
for j in range(mxlen):
if j == len(v):
length = j
break
w = v[j]
nch = min(len(w), maxw)
xs[j] = words_vocab.get(w, 0)
for k in range(nch):
xs_ch[j,k] = chars_vocab.get(w[k], 0)
batch['x'].append(xs)
batch['y'].append(ys)
batch['xch'].append(xs_ch)
batch['id'].append(i)
batch['length'].append(length)
return batch
class DConv():
def __init__(self, sess, name, version='1'):
self.sess = sess
self.name = name
self.version = version
def predict(self, batch, xform=True, training_phase=False, word_keep=1.0):
if not isinstance(batch, dict):
batch = _xform(batch, self.word_vocab, self.char_vocab, self.maxlen, self.maxw)
lengths = batch["length"]
feed_dict = {self.x: batch["x"],
self.xch: batch["xch"],
self.pkeep: 1.0,
self.word_keep: 1.0,
self.phase: training_phase}
# We can probably conditionally add the loss here
preds = []
with tf.variable_scope(self.name):
if self.crf is True:
probv, tranv = self.sess.run([self.probs, self.A], feed_dict=feed_dict)
for pij, sl in zip(probv, lengths):
unary = pij[:sl]
viterbi, _ = tf.contrib.crf.viterbi_decode(unary, tranv)
preds.append(viterbi)
else:
# Get batch (B, T)
bestv = self.sess.run(self.best, feed_dict=feed_dict)
# Each sentence, probv
for pij, sl in zip(bestv, lengths):
unary = pij[:sl]
preds.append(unary)
if xform:
# print(preds)
return [[self.y_lut[i] for i in sent] for sent in preds]
else:
return preds
@classmethod
def restore(cls, sess, indir, base, checkpoint_name=None):
"""
this method NEEDS to know the base name used in training for the model.
while i declare a variable scope, I still grab variables by names, so
we see duplication in using the base name to get the variables out. It
would be great to fix this at some point to be cleaner.
"""
klass = cls(sess, base)
basename = indir + '/' + base
checkpoint_name = checkpoint_name or basename
with open(basename + '.saver') as fsv:
saver_def = tf.train.SaverDef()
text_format.Merge(fsv.read(), saver_def)
print('Loaded saver def')
with gfile.FastGFile(basename + '.graph', 'r') as f:
gd = tf.GraphDef()
gd.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(gd, name='')
print('Imported graph def')
with tf.variable_scope(base):
sess.run(saver_def.restore_op_name,
{saver_def.filename_tensor_name: checkpoint_name})
klass.x = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'x:0')
klass.xch = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'xch:0')
klass.y = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'y:0')
klass.pkeep = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'pkeep:0')
klass.word_keep = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'word_keep:0')
klass.phase = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'phase:0')
klass.best = tf.get_default_graph().get_tensor_by_name('output/ArgMax:0') # X
klass.probs = tf.get_default_graph().get_tensor_by_name('output/transpose:0') # X
try:
klass.A = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'Loss/block/transitions:0')
print('Found transition matrix in graph, setting crf=True')
klass.crf = True
except:
print('Failed to get transition matrix, setting crf=False')
klass.A = None
klass.crf = False
with open(basename + '.labels', 'r') as f:
klass.labels = json.load(f)
klass.word_vocab = {}
if os.path.exists(basename + '-word.vocab'):
with open(basename + '-word.vocab', 'r') as f:
klass.word_vocab = json.load(f)
with open(basename + '-char.vocab', 'r') as f:
klass.char_vocab = json.load(f)
with open(basename + '-params', 'r') as f:
params = json.load(f)
klass.maxlen = params['maxlen']
klass.maxw = params['maxw']
# self.name = params['model_name']
klass.saver = tf.train.Saver(saver_def=saver_def)
klass.y_lut = revlut(klass.labels)
return klass
def ex2dict(self, batch, pkeep, phase, word_keep):
return {
self.x: batch["x"],
self.xch: batch["xch"],
self.y: batch["y"],
self.pkeep: pkeep,
self.word_keep: word_keep,
self.phase: phase
}
def createLoss(self):
with tf.name_scope("Loss"):
loss = tf.constant(0.0)
gold = tf.cast(self.y, tf.float32)
mask = tf.sign(gold)
lengths = tf.reduce_sum(mask, name="lengths",
reduction_indices=1)
all_total = tf.reduce_sum(lengths, name="total")
#block_scores = tf.unstack(self.intermediate_probs, axis=-1)
block_scores = self.intermediate_probs
print("block_sore length", len(block_scores))
block_no_dropout_scores, _ = self.forward(1.0, 1.0, 1.0, reuse=True)
print("block_score_no_dropout length", len(block_no_dropout_scores))
print("block_score length after anothe fwd", len(block_scores))
all_loss = []
for block, block_no_drop in zip(block_scores, block_no_dropout_scores):
print(block.get_shape())
# reuse = i != 0
# with tf.variable_scope('block', reuse=reuse):
if self.crf is True:
print('crf=True, creating SLL')
viterbi_loss = self._computeSentenceLevelLoss(self.y, mask, lengths, None, block)
all_loss.append(viterbi_loss)
else:
print('crf=False, creating WLL')
all_loss.append(self._computeWordLevelLoss(gold, mask, None, block))
l2_loss = tf.nn.l2_loss(tf.subtract(block, block_no_drop))
loss += self.drop_penalty * l2_loss
loss += tf.reduce_mean(all_loss)
return loss
def _computeSentenceLevelLoss(self, gold, mask, lengths, model, probs):
#zero_elements = tf.equal(lengths, tf.zeros_like(lengths))
#count_zeros_per_row = tf.reduce_sum(tf.to_int32(zero_elements), axis=1)
#flat_sequence_lengths = tf.add(tf.reduce_sum(lengths, 1),
# tf.scalar_mul(2, count_zeros_per_row))
print(probs.get_shape())
print(lengths.get_shape())
print(gold.get_shape())
ll, A = tf.contrib.crf.crf_log_likelihood(probs, gold, lengths, transition_params=self.A)
# print(model.probs)
#all_total = tf.reduce_sum(lengths, name="total")
return tf.reduce_mean(-ll)
def _computeWordLevelLoss(self, gold, mask, model, probs):
nc = len(self.labels)
# Cross entropy loss
cross_entropy = tf.one_hot(self.y, nc, axis=-1) * tf.log(
tf.clip_by_value(tf.nn.softmax(probs), 1e-10, 5.0))
cross_entropy = -tf.reduce_sum(cross_entropy, reduction_indices=2)
cross_entropy *= mask
cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1)
all_loss = tf.reduce_mean(cross_entropy, name="loss")
return all_loss
def block(self, wembed, kernel_sz, num_filt, num_layers, reuse=False):
dilation_rate = 2
initialization = 'identity'
nonlinearity = 'relu'
input_tensor = wembed
with tf.variable_scope('iterated-block', reuse=reuse):
for i in range(0, num_layers):
if i == num_layers-1:
dilation_rate = 1
filter_shape = [1, kernel_sz, num_filt, num_filt]
w = tf_utils.initialize_weights(filter_shape, 'conv-'+ str(i) + "_w", init_type=initialization, gain=nonlinearity, divisor=self.num_classes)
b = tf.get_variable('conv-'+ str(i) + "_b", initializer=tf.constant(0.0 if initialization == "identity" or initialization == "varscale" else 0.001, shape=[num_filt]))
conv = tf.nn.atrous_conv2d(input_tensor,
w,
rate=dilation_rate**i,
padding="SAME",
name='conv-'+ str(i))
conv_b = tf.nn.bias_add(conv, b)
nonlinearity = tf_utils.apply_nonlinearity(conv_b, "relu")
input_tensor = nonlinearity + input_tensor
tf.summary.histogram('conv-'+str(i), input_tensor)
# input_tensor = tf.nn.relu(input_tensor, name="relu-"+str(i))
return input_tensor
def params(self, labels, word_vec, char_vec, mxlen,
maxw, rnntype, wsz, hsz, filtsz, num_filt=64,
kernel_size=3, num_layers=4, num_iterations=3,
crf=False):
self.num_iterations = num_iterations
self.num_layers = num_layers
self.kernel_size = kernel_size
self.num_filt = num_filt
self.crf = crf
char_dsz = char_vec.dsz
nc = len(labels)
self.num_classes=nc
self.x = tf.placeholder(tf.int32, [None, mxlen], name="x")
self.xch = tf.placeholder(tf.int32, [None, mxlen, maxw], name="xch")
self.y = tf.placeholder(tf.int32, [None, mxlen], name="y")
self.intermediate_probs = tf.placeholder(tf.int32, [None, mxlen, nc, num_iterations+2], name="y")
self.pkeep = tf.placeholder(tf.float32, name="pkeep")
self.word_keep = tf.placeholder(tf.float32, name="word_keep")
self.labels = labels
self.y_lut = revlut(labels)
self.phase = tf.placeholder(tf.bool, name="phase")
self.l2_loss = tf.constant(0.0)
self.word_vocab = {}
if word_vec is not None:
self.word_vocab = word_vec.vocab
self.char_vocab = char_vec.vocab
self.char_dsz = char_dsz
self.wsz = wsz
self.mxlen = mxlen
self.drop_penalty = 0.001
self.A = tf.get_variable("transitions", [self.num_classes, self.num_classes])
# if num_filt != nc:
# raise RuntimeError('number of filters needs to be equal to number of classes!')
self.filtsz = [int(filt) for filt in filtsz.split(',') ]
with tf.variable_scope('output/'):
W = tf.Variable(tf.truncated_normal([self.num_filt, nc],
stddev = 0.1), name="W")
# W = tf.get_variable('W', initializer=tf.contrib.layers.xavier_initializer(), shape=[num_filt, nc])
b = tf.Variable(tf.constant(0.0, shape=[1,nc]), name="b")
intermediates = []
if word_vec is not None:
with tf.name_scope("WordLUT"):
self.Ww = tf.Variable(tf.constant(word_vec.weights, dtype=tf.float32), name = "W")
self.we0 = tf.scatter_update(self.Ww, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros(shape=[1, word_vec.dsz]))
with tf.name_scope("CharLUT"):
self.Wc = tf.Variable(tf.constant(char_vec.weights, dtype=tf.float32), name = "W")
self.ce0 = tf.scatter_update(self.Wc, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros(shape=[1, self.char_dsz]))
self.input_dropout_keep_prob = self.word_keep
self.middle_dropout_keep_prob = 1.00
self.hidden_dropout_keep_prob = self.pkeep
self.intermediate_probs, self.probs = self.forward(self.hidden_dropout_keep_prob,
self.input_dropout_keep_prob,
self.middle_dropout_keep_prob,
reuse=False)
self.loss = self.createLoss()
def forward(self, hidden_keep, input_keep, middle_keep, reuse=True):
"""
used to determine the actual graph.
returns (intermediate_probs, probs). technically probs is the last layer of
the intermediate probs.
"""
block_unflat_scores = []
with tf.variable_scope("forward", reuse=reuse):
with tf.control_dependencies([self.we0]):
wembed = tf.nn.embedding_lookup(self.Ww, self.x, name="embeddings")
with tf.control_dependencies([self.ce0]):
xch_seq = tensorToSeq(self.xch)
cembed_seq = []
for i, xch_i in enumerate(xch_seq):
cembed_seq.append(shared_char_word(self.Wc, xch_i, self.filtsz, self.char_dsz, self.wsz, None if (i == 0 and not reuse) else True))
word_char = seqToTensor(cembed_seq)
input_feats = tf.concat([wembed, word_char], 2)
input_feats_expanded = tf.expand_dims(input_feats, 1)
input_feats_expanded_drop = tf.nn.dropout(input_feats_expanded, self.input_dropout_keep_prob)
# first projection of embeddings
filter_shape = [1, self.kernel_size, input_feats.get_shape()[2], self.num_filt]
w = tf_utils.initialize_weights(filter_shape, "conv_start" + "_w", init_type='xavier', gain='relu')
b = tf.get_variable("conv_start" + "_b", initializer=tf.constant(0.01, shape=[self.num_filt]))
conv0 = tf.nn.conv2d(input_feats_expanded_drop, w, strides=[1, 1, 1, 1], padding="SAME", name="conv_start")
h0 = tf_utils.apply_nonlinearity(tf.nn.bias_add(conv0, b), 'relu')
initial_inputs = [h0]
last_dims = self.num_filt
self.share_repeats = True
self.projection = False
# Stacked atrous convolutions
last_output = tf.concat(axis=3, values=initial_inputs)
for iteration in range(self.num_iterations):
hidden_outputs = []
total_output_width = self.num_filt
reuse_block = (iteration != 0)
block_name_suff = "" if self.share_repeats else str(block)
inner_last_dims = last_dims
inner_last_output = last_output
with tf.variable_scope("block" + block_name_suff, reuse=reuse_block):
block_output = self.block(inner_last_output, self.kernel_size, self.num_filt, self.num_layers, reuse=reuse_block)
#legacy strubell logic. we only grab the last layer of the block here. always.
h_concat = tf.concat(axis=3, values=[block_output])
last_output = tf.nn.dropout(h_concat, self.middle_dropout_keep_prob)
last_dims = total_output_width
h_concat_squeeze = tf.squeeze(h_concat, [1])
h_concat_flat = tf.reshape(h_concat_squeeze, [-1, total_output_width])
# Add dropout
with tf.name_scope("hidden_dropout"):
h_drop = tf.nn.dropout(h_concat_flat, self.hidden_dropout_keep_prob)
def do_projection():
# Project raw outputs down
with tf.name_scope("projection"):
projection_width = int(total_output_width/(2*len(hidden_outputs)))
w_p = tf_utils.initialize_weights([total_output_width, projection_width], "w_p", init_type="xavier")
b_p = tf.get_variable("b_p", initializer=tf.constant(0.01, shape=[projection_width]))
projected = tf.nn.xw_plus_b(h_drop, w_p, b_p, name="projected")
projected_nonlinearity = tf_utils.apply_nonlinearity(projected, self.nonlinearity)
return projected_nonlinearity, projection_width
# only use projection if we wanted to, and only apply middle dropout here if projection
input_to_pred, proj_width = do_projection() if self.projection else (h_drop, total_output_width)
input_to_pred_drop = tf.nn.dropout(input_to_pred, self.middle_dropout_keep_prob) if self.projection else input_to_pred
# Final (unnormalized) scores and predictions
with tf.name_scope("output"+block_name_suff):
w_o = tf_utils.initialize_weights([proj_width, self.num_classes], "w_o", init_type="xavier")
b_o = tf.get_variable("b_o", initializer=tf.constant(0.01, shape=[self.num_classes]))
self.l2_loss += tf.nn.l2_loss(w_o)
self.l2_loss += tf.nn.l2_loss(b_o)
scores = tf.nn.xw_plus_b(input_to_pred_drop, w_o, b_o, name="scores")
unflat_scores = tf.reshape(scores, tf.stack([-1, self.mxlen, self.num_classes]))
block_unflat_scores.append(unflat_scores)
# probs = unflat_scores
# best = tf.argmax(self.probs, 2)
# intermediate_probs = tf.stack(block_unflat_scores, -1)
return block_unflat_scores, unflat_scores
def log(tensor):
print(tensor)
def highway_conns(inputs, wsz_all, n, reuse):
for i in range(n):
with tf.variable_scope("highway-%d" % i,reuse=reuse):
W_p = tf.get_variable("W_p", [wsz_all, wsz_all])
b_p = tf.get_variable("B_p", [1, wsz_all], initializer=tf.constant_initializer(0.0))
proj = tf.nn.relu(tf.matmul(inputs, W_p) + b_p, "relu-proj")
W_t = tf.get_variable("W_t", [wsz_all, wsz_all])
b_t = tf.get_variable("B_t", [1, wsz_all], initializer=tf.constant_initializer(-2.0))
transform = tf.nn.sigmoid(tf.matmul(inputs, W_t) + b_t, "sigmoid-transform")
inputs = tf.multiply(transform, proj) + tf.multiply(inputs, 1 - transform)
return inputs
def skip_conns(inputs, wsz_all, n, reuse):
for i in range(n):
with tf.variable_scope("skip-%d" % i, reuse=reuse):
W_p = tf.get_variable("W_p", [wsz_all, wsz_all])
b_p = tf.get_variable("B_p", [1, wsz_all], initializer=tf.constant_initializer(0.0))
proj = tf.nn.relu(tf.matmul(inputs, W_p) + b_p, "relu")
inputs = inputs + proj
return inputs
def char_word_conv_embeddings(char_vec, filtsz, char_dsz, wsz, reuse):
"""
char_vec:
filtsz: string of comma separated filter sizes. "1,2,3,"
"""
expanded = tf.expand_dims(char_vec, -1)
mots = []
for i, fsz in enumerate(filtsz):
with tf.variable_scope('cmot-%s' % fsz, reuse=reuse):
kernel_shape = [fsz, char_dsz, 1, wsz]
# Weight tying
W = tf.get_variable("W", kernel_shape)
b = tf.get_variable("b", [wsz], initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(expanded,
W, strides=[1,1,1,1],
padding="VALID", name="conv")
activation = tf.nn.relu(tf.nn.bias_add(conv, b), "activation")
mot = tf.reduce_max(activation, [1], keep_dims=True)
# Add back in the dropout
mots.append(mot)
wsz_all = wsz * len(mots)
combine = tf.reshape(tf.concat(values=mots, axis=3), [-1, wsz_all])
joined = highway_conns(combine, wsz_all, 1, reuse)
# joined = skip_conns(combine, wsz_all, 1, reuse)
return joined
def shared_char_word(Wch, xch_i, filtsz, char_dsz, wsz, reuse):
with tf.variable_scope("SharedCharWord", reuse=reuse):
# Zeropad the letters out to half the max filter size, to account for
# wide convolution. This way we don't have to explicitly pad the
# data upfront, which means our Y sequences can be assumed not to
# start with zeros
mxfiltsz = np.max(filtsz)
halffiltsz = int(math.floor(mxfiltsz / 2))
zeropad = tf.pad(xch_i, [[0,0], [halffiltsz, halffiltsz]], "CONSTANT")
cembed = tf.nn.embedding_lookup(Wch, zeropad)
if len(filtsz) == 0 or filtsz[0] == 0:
return tf.reduce_sum(cembed, [1])
return char_word_conv_embeddings(cembed, filtsz, char_dsz, wsz, reuse)
def tensor2seq(tensor):
return tf.unstack(tf.transpose(tensor, perm=[1, 0, 2]))
def seq2tensor(sequence):
return tf.transpose(tf.stack(sequence), perm=[1, 0, 2])
| gpl-3.0 | 1,162,317,117,842,288,600 | 41.514507 | 182 | 0.553321 | false |
tomasdubec/openstack-cinder | cinder/wsgi.py | 1 | 16242 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import errno
import os
import socket
import ssl
import sys
import time
import eventlet
import eventlet.wsgi
import greenlet
from oslo.config import cfg
from paste import deploy
import routes.middleware
import webob.dec
import webob.exc
from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder import utils
socket_opts = [
cfg.IntOpt('backlog',
default=4096,
help="Number of backlog requests to configure the socket with"),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X."),
cfg.StrOpt('ssl_ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('ssl_cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('ssl_key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(socket_opts)
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class Server(object):
"""Server class to manage a WSGI server, serving a WSGI application."""
default_pool_size = 1000
def __init__(self, name, app, host=None, port=None, pool_size=None,
protocol=eventlet.wsgi.HttpProtocol):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:returns: None
"""
self.name = name
self.app = app
self._host = host or "0.0.0.0"
self._port = port or 0
self._server = None
self._socket = None
self._protocol = protocol
self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
self._logger = logging.getLogger("eventlet.wsgi.server")
self._wsgi_logger = logging.WritableLogger(self._logger)
def _get_socket(self, host, port, backlog):
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
family = socket.AF_INET
cert_file = CONF.ssl_cert_file
key_file = CONF.ssl_key_file
ca_file = CONF.ssl_ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
def wrap_ssl(sock):
ssl_kwargs = {
'server_side': True,
'certfile': cert_file,
'keyfile': key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
sock = None
retry_until = time.time() + 30
while not sock and time.time() < retry_until:
try:
sock = eventlet.listen(bind_addr,
backlog=backlog,
family=family)
if use_ssl:
sock = wrap_ssl(sock)
except socket.error, err:
if err.args[0] != errno.EADDRINUSE:
raise
eventlet.sleep(0.1)
if not sock:
raise RuntimeError(_("Could not bind to %(host)s:%(port)s "
"after trying for 30 seconds") %
{'host': host, 'port': port})
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
return sock
def _start(self):
"""Run the blocking eventlet WSGI server.
:returns: None
"""
eventlet.wsgi.server(self._socket,
self.app,
protocol=self._protocol,
custom_pool=self._pool,
log=self._wsgi_logger)
def start(self, backlog=128):
"""Start serving a WSGI application.
:param backlog: Maximum number of queued connections.
:returns: None
:raises: cinder.exception.InvalidInput
"""
if backlog < 1:
raise exception.InvalidInput(
reason='The backlog must be more than 1')
self._socket = self._get_socket(self._host,
self._port,
backlog=backlog)
self._server = eventlet.spawn(self._start)
(self._host, self._port) = self._socket.getsockname()[0:2]
LOG.info(_("Started %(name)s on %(_host)s:%(_port)s") % self.__dict__)
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def stop(self):
"""Stop this server.
This is not a very nice action, as currently the method by which a
server is stopped is by killing its eventlet.
:returns: None
"""
LOG.info(_("Stopping WSGI server."))
self._server.kill()
def wait(self):
"""Block, until the server has stopped.
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
self._server.wait()
except greenlet.GreenletExit:
LOG.info(_("WSGI server has stopped."))
class Request(webob.Request):
pass
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = cinder.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import cinder.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(detail='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError(_('You must implement __call__'))
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = cinder.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import cinder.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
print ('*' * 40) + ' REQUEST ENVIRON'
for key, value in req.environ.items():
print key, '=', value
print
resp = req.get_response(self.application)
print ('*' * 40) + ' RESPONSE HEADERS'
for (key, value) in resp.headers.iteritems():
print key, '=', value
print
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
print ('*' * 40) + ' BODY'
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
def __init__(self, config_path=None):
"""Initialize the loader, and attempt to find the config.
:param config_path: Full or relative path to the paste config.
:returns: None
"""
config_path = config_path or FLAGS.api_paste_config
self.config_path = utils.find_config(config_path)
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
:raises: `cinder.exception.PasteAppNotFound`
"""
try:
return deploy.loadapp("config:%s" % self.config_path, name=name)
except LookupError as err:
LOG.error(err)
raise exception.PasteAppNotFound(name=name, path=self.config_path)
| apache-2.0 | -119,044,688,652,326,480 | 31.945233 | 79 | 0.588536 | false |
nlevitt/brozzler | vagrant/vagrant-brozzler-new-site.py | 1 | 3061 | #!/usr/bin/env python
'''
vagrant-brozzler-new-site.py - runs brozzler-new-site inside the vagrant vm to
queue a site for your vagrant brozzler deployment.
Fills in the --proxy option automatically. Some other options are passed
through.
This is a standalone script with no dependencies other than python, and should
work with python 2.7 or python 3.2+. The only reason it's not a bash script is
so we can use the argparse library.
Copyright (C) 2016 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import os
import argparse
import subprocess
try:
from shlex import quote
except:
from pipes import quote
def main(argv=[]):
arg_parser = argparse.ArgumentParser(prog=os.path.basename(argv[0]))
arg_parser.add_argument('seed', metavar='SEED', help='seed url')
arg_parser.add_argument(
'--time-limit', dest='time_limit', default=None,
help='time limit in seconds for this site')
arg_parser.add_argument(
'--ignore-robots', dest='ignore_robots', action='store_true',
help='ignore robots.txt for this site')
arg_parser.add_argument(
'--warcprox-meta', dest='warcprox_meta',
help=(
'Warcprox-Meta http request header to send with each request; '
'must be a json blob, ignored unless warcprox features are '
'enabled'))
arg_parser.add_argument(
'-q', '--quiet', dest='quiet', action='store_true')
arg_parser.add_argument(
'-v', '--verbose', dest='verbose', action='store_true')
args = arg_parser.parse_args(args=argv[1:])
options = []
if args.time_limit:
options.append('--time-limit=%s' % args.time_limit)
if args.ignore_robots:
options.append('--ignore-robots')
if args.warcprox_meta:
# I think this shell escaping is correct?
options.append(
'--warcprox-meta=%s' % quote(args.warcprox_meta))
if args.quiet:
options.append('--quiet')
if args.verbose:
options.append('--verbose')
# cd to path with Vagrantfile so "vagrant ssh" knows what to do
os.chdir(os.path.dirname(__file__))
cmd = (
'PYTHONPATH=/home/vagrant/brozzler-ve34/lib/python3.4/site-packages '
'/home/vagrant/brozzler-ve34/bin/python '
'/home/vagrant/brozzler-ve34/bin/brozzler-new-site '
'--proxy=localhost:8000 %s %s') % (
' '.join(options), args.seed)
subprocess.call(['vagrant', 'ssh', '--', cmd])
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | -5,619,217,285,461,164,000 | 34.593023 | 79 | 0.656648 | false |
jacor-/TuentiChallenge5 | problem16/snippet.py | 1 | 3091 | """
To solve this problem we have used the software
The Full Whiskas Model example in the package PuLP for python.
I have no clue about linear optimization... so this package has been intinitely
helpful. The code do not require much explanation and there is no much time
remaining in the contest... so I wont comment anything else!
"""
# Import PuLP modeler functions
from pulp import *
# Creates a list of the Ingredients
import numpy
import fileinput
inp = fileinput.input()
num_cases = int(inp.next());
for case in range(num_cases):
arboles, prediccio, lenadores = map(int,inp.next()[:-1].split(" ")[:3])
Treball_maxim = []
Work_required = []
for jj in range(lenadores):
work_list = [int(i) for i in inp.next()[:-1].split(" ") if len(i) > 0]
Treball_maxim.append(work_list[0])
Work_required.append(work_list[1:])
Dedicacio = []
for arbolito in range(arboles):
for lenador in range(lenadores):
Dedicacio.append("%d:%d"%(arbolito, lenador))
ArbolAssolible = []
for lenador in range(lenadores):
ArbolAssolible.append([])
for arbol in range(arboles):
ArbolAssolible[-1].append(float(Treball_maxim[lenador])/Work_required[lenador][arbol])
prob = LpProblem("My paranoia problem", LpMinimize)
ingredient_vars = LpVariable.dicts("Dedicacio ",Dedicacio,lowBound=0.,upBound=1.)#,0)
main_cost = []
### El coste total tiene buena pinta...
for lenador in range(lenadores):
main_cost.append(lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)] for arbolito in range(arboles)]) *Treball_maxim[lenador])
prob += lpSum(main_cost)#, "Total Cost of Ingredients per can"
for arbolito in range(arboles):
for lenador in range(lenadores):
prob += lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)] * ArbolAssolible[lenador][arbolito] ]) <= 1, ' garantizando que no curro por encima de mis posibilidades %d %d menor que uno' % (arbolito, lenador)
for lenador in range(lenadores):
prob += lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)] for arbolito in range(arboles)]) <= 1
for arbol in range(arboles):
prob += lpSum([ingredient_vars["%d:%d"%(arbol, lenador)]*ArbolAssolible[lenador][arbol] for lenador in range(lenadores)]) == 1, ' totalidad arbol %d cortado' % arbol
for arbolito in range(arboles):
for lenador in range(lenadores):
prob += lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)]]) >= 0, ' garantizando dedicacion %d %d positivo' % (arbolito, lenador)
# The problem data is written to an .lp file
prob.writeLP("WhiskasModel2.lp")
# The problem is solved using PuLP's choice of Solver
prob.solve()
if LpStatus[prob.status] == "Infeasible":
print "Test case #%d: IMPOSSIBLE" % (case+1)
elif numpy.around(prediccio,2) < numpy.around(value(prob.objective),2):
print "Test case #%d: %0.2f" % (case+1, value(prob.objective)-prediccio)
else:
print "Test case #%d: RIGHT" % (case+1)
| mit | 1,703,887,847,516,692,000 | 38.126582 | 219 | 0.650922 | false |
kdrone/crazyflie-python-client | build/lib.linux-i686-2.7/cfclient/ui/main.py | 1 | 21200 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
The main file for the Crazyflie control application.
"""
__author__ = 'Bitcraze AB'
__all__ = ['MainUI']
import sys
import logging
logger = logging.getLogger(__name__)
from PyQt4 import QtGui, uic
from PyQt4.QtCore import pyqtSignal, Qt, pyqtSlot, QDir, QUrl
from PyQt4.QtGui import QLabel, QActionGroup, QMessageBox, QAction, QDesktopServices
from dialogs.connectiondialogue import ConnectDialogue
from dialogs.inputconfigdialogue import InputConfigDialogue
from dialogs.cf2config import Cf2ConfigDialog
from dialogs.cf1config import Cf1ConfigDialog
from cflib.crazyflie import Crazyflie
from dialogs.logconfigdialogue import LogConfigDialogue
from cfclient.utils.input import JoystickReader
from cfclient.utils.guiconfig import GuiConfig
from cfclient.utils.logconfigreader import LogConfigReader
from cfclient.utils.config_manager import ConfigManager
import cfclient.ui.toolboxes
import cfclient.ui.tabs
import cflib.crtp
from cflib.crazyflie.log import Log, LogVariable, LogConfig
from cfclient.ui.dialogs.bootloader import BootloaderDialog
from cfclient.ui.dialogs.about import AboutDialog
(main_window_class,
main_windows_base_class) = (uic.loadUiType(sys.path[0] +
'/cfclient/ui/main.ui'))
class MyDockWidget(QtGui.QDockWidget):
closed = pyqtSignal()
def closeEvent(self, event):
super(MyDockWidget, self).closeEvent(event)
self.closed.emit()
class UIState:
DISCONNECTED = 0
CONNECTING = 1
CONNECTED = 2
class MainUI(QtGui.QMainWindow, main_window_class):
connectionLostSignal = pyqtSignal(str, str)
connectionInitiatedSignal = pyqtSignal(str)
batteryUpdatedSignal = pyqtSignal(int, object, object)
connectionDoneSignal = pyqtSignal(str)
connectionFailedSignal = pyqtSignal(str, str)
disconnectedSignal = pyqtSignal(str)
linkQualitySignal = pyqtSignal(int)
_input_device_error_signal = pyqtSignal(str)
_input_discovery_signal = pyqtSignal(object)
_log_error_signal = pyqtSignal(object, str)
def __init__(self, *args):
super(MainUI, self).__init__(*args)
self.setupUi(self)
self.cf = Crazyflie(ro_cache=sys.path[0] + "/cflib/cache",
rw_cache=sys.path[1] + "/cache")
cflib.crtp.init_drivers(enable_debug_driver=GuiConfig()
.get("enable_debug_driver"))
# Create the connection dialogue
self.connectDialogue = ConnectDialogue()
# Create and start the Input Reader
self._statusbar_label = QLabel("Loading device and configuration.")
self.statusBar().addWidget(self._statusbar_label)
self.joystickReader = JoystickReader()
self._active_device = ""
self.configGroup = QActionGroup(self._menu_mappings, exclusive=True)
self._load_input_data()
self._update_input
ConfigManager().conf_needs_reload.add_callback(self._reload_configs)
# Connections for the Connect Dialogue
self.connectDialogue.requestConnectionSignal.connect(self.cf.open_link)
self.connectionDoneSignal.connect(self.connectionDone)
self.cf.connection_failed.add_callback(self.connectionFailedSignal.emit)
self.connectionFailedSignal.connect(self.connectionFailed)
self._input_device_error_signal.connect(self.inputDeviceError)
self.joystickReader.device_error.add_callback(
self._input_device_error_signal.emit)
self._input_discovery_signal.connect(self.device_discovery)
self.joystickReader.device_discovery.add_callback(
self._input_discovery_signal.emit)
# Connect UI signals
self.menuItemConnect.triggered.connect(self.connectButtonClicked)
self.logConfigAction.triggered.connect(self.doLogConfigDialogue)
self.connectButton.clicked.connect(self.connectButtonClicked)
self.quickConnectButton.clicked.connect(self.quickConnect)
self.menuItemQuickConnect.triggered.connect(self.quickConnect)
self.menuItemConfInputDevice.triggered.connect(self.configInputDevice)
self.menuItemExit.triggered.connect(self.closeAppRequest)
self.batteryUpdatedSignal.connect(self.updateBatteryVoltage)
self._menuitem_rescandevices.triggered.connect(self._rescan_devices)
self._menuItem_openconfigfolder.triggered.connect(self._open_config_folder)
self._auto_reconnect_enabled = GuiConfig().get("auto_reconnect")
self.autoReconnectCheckBox.toggled.connect(
self._auto_reconnect_changed)
self.autoReconnectCheckBox.setChecked(GuiConfig().get("auto_reconnect"))
# Do not queue data from the controller output to the Crazyflie wrapper
# to avoid latency
#self.joystickReader.sendControlSetpointSignal.connect(
# self.cf.commander.send_setpoint,
# Qt.DirectConnection)
self.joystickReader.input_updated.add_callback(
self.cf.commander.send_setpoint)
# Connection callbacks and signal wrappers for UI protection
self.cf.connected.add_callback(
self.connectionDoneSignal.emit)
self.connectionDoneSignal.connect(self.connectionDone)
self.cf.disconnected.add_callback(self.disconnectedSignal.emit)
self.disconnectedSignal.connect(
lambda linkURI: self.setUIState(UIState.DISCONNECTED,
linkURI))
self.cf.connection_lost.add_callback(self.connectionLostSignal.emit)
self.connectionLostSignal.connect(self.connectionLost)
self.cf.connection_requested.add_callback(
self.connectionInitiatedSignal.emit)
self.connectionInitiatedSignal.connect(
lambda linkURI: self.setUIState(UIState.CONNECTING,
linkURI))
self._log_error_signal.connect(self._logging_error)
# Connect link quality feedback
self.cf.link_quality_updated.add_callback(self.linkQualitySignal.emit)
self.linkQualitySignal.connect(
lambda percentage: self.linkQualityBar.setValue(percentage))
# Set UI state in disconnected buy default
self.setUIState(UIState.DISCONNECTED)
# Parse the log configuration files
self.logConfigReader = LogConfigReader(self.cf)
# Add things to helper so tabs can access it
cfclient.ui.pluginhelper.cf = self.cf
cfclient.ui.pluginhelper.inputDeviceReader = self.joystickReader
cfclient.ui.pluginhelper.logConfigReader = self.logConfigReader
self.logConfigDialogue = LogConfigDialogue(cfclient.ui.pluginhelper)
self._bootloader_dialog = BootloaderDialog(cfclient.ui.pluginhelper)
self._cf2config_dialog = Cf2ConfigDialog(cfclient.ui.pluginhelper)
self._cf1config_dialog = Cf1ConfigDialog(cfclient.ui.pluginhelper)
self.menuItemBootloader.triggered.connect(self._bootloader_dialog.show)
self._about_dialog = AboutDialog(cfclient.ui.pluginhelper)
self.menuItemAbout.triggered.connect(self._about_dialog.show)
self._menu_cf2_config.triggered.connect(self._cf2config_dialog.show)
self._menu_cf1_config.triggered.connect(self._cf1config_dialog.show)
# Loading toolboxes (A bit of magic for a lot of automatic)
self.toolboxes = []
self.toolboxesMenuItem.setMenu(QtGui.QMenu())
for t_class in cfclient.ui.toolboxes.toolboxes:
toolbox = t_class(cfclient.ui.pluginhelper)
dockToolbox = MyDockWidget(toolbox.getName())
dockToolbox.setWidget(toolbox)
self.toolboxes += [dockToolbox, ]
# Add menu item for the toolbox
item = QtGui.QAction(toolbox.getName(), self)
item.setCheckable(True)
item.triggered.connect(self.toggleToolbox)
self.toolboxesMenuItem.menu().addAction(item)
dockToolbox.closed.connect(lambda: self.toggleToolbox(False))
# Setup some introspection
item.dockToolbox = dockToolbox
item.menuItem = item
dockToolbox.dockToolbox = dockToolbox
dockToolbox.menuItem = item
# Load and connect tabs
self.tabsMenuItem.setMenu(QtGui.QMenu())
tabItems = {}
self.loadedTabs = []
for tabClass in cfclient.ui.tabs.available:
tab = tabClass(self.tabs, cfclient.ui.pluginhelper)
item = QtGui.QAction(tab.getMenuName(), self)
item.setCheckable(True)
item.toggled.connect(tab.toggleVisibility)
self.tabsMenuItem.menu().addAction(item)
tabItems[tab.getTabName()] = item
self.loadedTabs.append(tab)
if not tab.enabled:
item.setEnabled(False)
# First instantiate all tabs and then open them in the correct order
try:
for tName in GuiConfig().get("open_tabs").split(","):
t = tabItems[tName]
if (t != None and t.isEnabled()):
# Toggle though menu so it's also marked as open there
t.toggle()
except Exception as e:
logger.warning("Exception while opening tabs [%s]", e)
def setUIState(self, newState, linkURI=""):
self.uiState = newState
if (newState == UIState.DISCONNECTED):
self.setWindowTitle("HANSEI Not connected")
self.menuItemConnect.setText("Connect to Crazyflie")
self.connectButton.setText("Connect")
self.menuItemQuickConnect.setEnabled(True)
self.batteryBar.setValue(3000)
self._menu_cf2_config.setEnabled(False)
self.linkQualityBar.setValue(0)
self.menuItemBootloader.setEnabled(True)
self.logConfigAction.setEnabled(False)
if (len(GuiConfig().get("link_uri")) > 0):
self.quickConnectButton.setEnabled(True)
if (newState == UIState.CONNECTED):
s = "Connected on %s" % linkURI
self.setWindowTitle(s)
self.menuItemConnect.setText("Disconnect")
self.connectButton.setText("Disconnect")
self.logConfigAction.setEnabled(True)
self._menu_cf2_config.setEnabled(True)
if (newState == UIState.CONNECTING):
s = "Connecting to %s ..." % linkURI
self.setWindowTitle(s)
self.menuItemConnect.setText("Cancel")
self.connectButton.setText("Cancel")
self.quickConnectButton.setEnabled(False)
self.menuItemBootloader.setEnabled(False)
self.menuItemQuickConnect.setEnabled(False)
@pyqtSlot(bool)
def toggleToolbox(self, display):
menuItem = self.sender().menuItem
dockToolbox = self.sender().dockToolbox
if display and not dockToolbox.isVisible():
dockToolbox.widget().enable()
self.addDockWidget(dockToolbox.widget().preferedDockArea(),
dockToolbox)
dockToolbox.show()
elif not display:
dockToolbox.widget().disable()
self.removeDockWidget(dockToolbox)
dockToolbox.hide()
menuItem.setChecked(False)
def _rescan_devices(self):
self._statusbar_label.setText("No inputdevice connected!")
self._menu_devices.clear()
self._active_device = ""
self.joystickReader.stop_input()
for c in self._menu_mappings.actions():
c.setEnabled(False)
devs = self.joystickReader.getAvailableDevices()
if (len(devs) > 0):
self.device_discovery(devs)
def configInputDevice(self):
self.inputConfig = InputConfigDialogue(self.joystickReader)
self.inputConfig.show()
def _auto_reconnect_changed(self, checked):
self._auto_reconnect_enabled = checked
GuiConfig().set("auto_reconnect", checked)
logger.info("Auto reconnect enabled: %s", checked)
def doLogConfigDialogue(self):
self.logConfigDialogue.show()
def updateBatteryVoltage(self, timestamp, data, logconf):
self.batteryBar.setValue(int(data["pm.vbat"] * 1000))
def connectionDone(self, linkURI):
self.setUIState(UIState.CONNECTED, linkURI)
GuiConfig().set("link_uri", linkURI)
lg = LogConfig("Battery", 1000)
lg.add_variable("pm.vbat", "float")
self.cf.log.add_config(lg)
if lg.valid:
lg.data_received_cb.add_callback(self.batteryUpdatedSignal.emit)
lg.error_cb.add_callback(self._log_error_signal.emit)
lg.start()
else:
logger.warning("Could not setup loggingblock!")
def _logging_error(self, log_conf, msg):
QMessageBox.about(self, "Log error", "Error when starting log config"
" [%s]: %s" % (log_conf.name, msg))
def connectionLost(self, linkURI, msg):
if not self._auto_reconnect_enabled:
if (self.isActiveWindow()):
warningCaption = "Communication failure"
error = "Connection lost to %s: %s" % (linkURI, msg)
QMessageBox.critical(self, warningCaption, error)
self.setUIState(UIState.DISCONNECTED, linkURI)
else:
self.quickConnect()
def connectionFailed(self, linkURI, error):
if not self._auto_reconnect_enabled:
msg = "Failed to connect on %s: %s" % (linkURI, error)
warningCaption = "Communication failure"
QMessageBox.critical(self, warningCaption, msg)
self.setUIState(UIState.DISCONNECTED, linkURI)
else:
self.quickConnect()
def closeEvent(self, event):
self.hide()
self.cf.close_link()
GuiConfig().save_file()
def connectButtonClicked(self):
if (self.uiState == UIState.CONNECTED):
self.cf.close_link()
elif (self.uiState == UIState.CONNECTING):
self.cf.close_link()
self.setUIState(UIState.DISCONNECTED)
else:
self.connectDialogue.show()
def inputDeviceError(self, error):
self.cf.close_link()
QMessageBox.critical(self, "Input device error", error)
def _load_input_data(self):
self.joystickReader.stop_input()
# Populate combo box with available input device configurations
for c in ConfigManager().get_list_of_configs():
node = QAction(c,
self._menu_mappings,
checkable=True,
enabled=False)
node.toggled.connect(self._inputconfig_selected)
self.configGroup.addAction(node)
self._menu_mappings.addAction(node)
def _reload_configs(self, newConfigName):
# remove the old actions from the group and the menu
for action in self._menu_mappings.actions():
self.configGroup.removeAction(action)
self._menu_mappings.clear()
# reload the conf files, and populate the menu
self._load_input_data()
self._update_input(self._active_device, newConfigName)
def _update_input(self, device="", config=""):
self.joystickReader.stop_input()
self._active_config = str(config)
self._active_device = str(device)
GuiConfig().set("input_device", self._active_device)
GuiConfig().get(
"device_config_mapping"
)[self._active_device] = self._active_config
self.joystickReader.start_input(self._active_device,
self._active_config)
# update the checked state of the menu items
for c in self._menu_mappings.actions():
c.setEnabled(True)
if c.text() == self._active_config:
c.setChecked(True)
for c in self._menu_devices.actions():
c.setEnabled(True)
if c.text() == self._active_device:
c.setChecked(True)
# update label
if device == "" and config == "":
self._statusbar_label.setText("No input device selected")
elif config == "":
self._statusbar_label.setText("Using [%s] - "
"No input config selected" %
(self._active_device))
else:
self._statusbar_label.setText("Using [%s] with config [%s]" %
(self._active_device,
self._active_config))
def _inputdevice_selected(self, checked):
if (not checked):
return
self.joystickReader.stop_input()
sender = self.sender()
self._active_device = sender.text()
device_config_mapping = GuiConfig().get("device_config_mapping")
if (self._active_device in device_config_mapping.keys()):
self._current_input_config = device_config_mapping[
str(self._active_device)]
else:
self._current_input_config = self._menu_mappings.actions()[0].text()
GuiConfig().set("input_device", str(self._active_device))
for c in self._menu_mappings.actions():
if (c.text() == self._current_input_config):
c.setChecked(True)
self.joystickReader.start_input(str(sender.text()),
self._current_input_config)
self._statusbar_label.setText("Using [%s] with config [%s]" % (
self._active_device,
self._current_input_config))
def _inputconfig_selected(self, checked):
if (not checked):
return
self._update_input(self._active_device, self.sender().text())
def device_discovery(self, devs):
group = QActionGroup(self._menu_devices, exclusive=True)
for d in devs:
node = QAction(d["name"], self._menu_devices, checkable=True)
node.toggled.connect(self._inputdevice_selected)
group.addAction(node)
self._menu_devices.addAction(node)
if (d["name"] == GuiConfig().get("input_device")):
self._active_device = d["name"]
if (len(self._active_device) == 0):
self._active_device = self._menu_devices.actions()[0].text()
device_config_mapping = GuiConfig().get("device_config_mapping")
if (device_config_mapping):
if (self._active_device in device_config_mapping.keys()):
self._current_input_config = device_config_mapping[
str(self._active_device)]
else:
self._current_input_config = self._menu_mappings.actions()[0].text()
else:
self._current_input_config = self._menu_mappings.actions()[0].text()
# Now we know what device to use and what mapping, trigger the events
# to change the menus and start the input
for c in self._menu_mappings.actions():
c.setEnabled(True)
if (c.text() == self._current_input_config):
c.setChecked(True)
for c in self._menu_devices.actions():
if (c.text() == self._active_device):
c.setChecked(True)
def quickConnect(self):
try:
self.cf.open_link(GuiConfig().get("link_uri"))
except KeyError:
self.cf.open_link("")
def _open_config_folder(self):
QDesktopServices.openUrl(QUrl("file:///" + QDir.toNativeSeparators(sys.path[1])))
def closeAppRequest(self):
self.close()
sys.exit(0)
| gpl-2.0 | 9,170,019,072,597,132,000 | 40.48728 | 89 | 0.614717 | false |
puttarajubr/commcare-hq | corehq/apps/builds/models.py | 1 | 7284 | from datetime import datetime
from zipfile import ZipFile
from corehq.apps.app_manager.const import APP_V1, APP_V2
from couchdbkit.exceptions import ResourceNotFound, BadValueError
from dimagi.ext.couchdbkit import *
from corehq.apps.builds.fixtures import commcare_build_config
from corehq.apps.builds.jadjar import JadJar
from corehq.util.quickcache import quickcache
class SemanticVersionProperty(StringProperty):
def validate(self, value, required=True):
super(SemanticVersionProperty, self).validate(value, required)
try:
major, minor, _ = value.split('.')
int(major)
int(minor)
except Exception:
raise BadValueError("Build version %r does not comply with the x.y.z schema" % value)
return value
class CommCareBuild(Document):
"""
#python manage.py shell
#>>> from corehq.apps.builds.models import CommCareBuild
#>>> build = CommCareBuild.create_from_zip('/Users/droberts/Desktop/zip/7106.zip', '1.2.dev', 7106)
"""
build_number = IntegerProperty()
version = SemanticVersionProperty()
time = DateTimeProperty()
def put_file(self, payload, path, filename=None):
"""
Add an attachment to the build (useful for constructing the build)
payload should be a file-like object
filename should be something like "Nokia/S40-generic/CommCare.jar"
"""
if filename:
path = '/'.join([path, filename])
content_type = {
'jad': 'text/vnd.sun.j2me.app-descriptor',
'jar': 'application/java-archive',
}.get(path.split('.')[-1])
self.put_attachment(payload, path, content_type)
def fetch_file(self, path, filename=None):
if filename:
path = '/'.join([path, filename])
return self.fetch_attachment(path)
def get_jadjar(self, path):
"""
build.get_jadjar("Nokia/S40-generic")
"""
try:
jad = self.fetch_file(path, "CommCare.jad")
except ResourceNotFound:
jad = None
return JadJar(
jad=jad,
jar=self.fetch_file(path, "CommCare.jar"),
version=self.version,
build_number=self.build_number
)
@classmethod
def create_from_zip(cls, f, version, build_number):
"""f should be a file-like object or a path to a zipfile"""
self = cls(build_number=build_number, version=version, time=datetime.utcnow())
self.save()
with ZipFile(f) as z:
try:
for name in z.namelist():
path = name.split('/')
if path[0] == "dist" and path[-1] != "":
path = '/'.join(path[1:])
self.put_file(z.read(name), path)
except:
self.delete()
raise
return self
def minor_release(self):
major, minor, _ = self.version.split('.')
return int(major), int(minor)
def major_release(self):
major, _, _ = self.version.split('.')
return int(major)
@classmethod
def get_build(cls, version, build_number=None, latest=False):
"""
Call as either
CommCareBuild.get_build(version, build_number)
or
CommCareBuild.get_build(version, latest=True)
"""
if latest:
startkey = [version]
else:
build_number = int(build_number)
startkey = [version, build_number]
self = cls.view('builds/all',
startkey=startkey + [{}],
endkey=startkey,
descending=True,
limit=1,
include_docs=True,
reduce=False,
).one()
if not self:
raise KeyError("Can't find build {label}. For instructions on how to add it, see https://github.com/dimagi/commcare-hq/blob/master/corehq/apps/builds/README.md".format(label=BuildSpec(
version=version,
build_number=build_number,
latest=latest
)))
return self
@classmethod
def all_builds(cls):
return cls.view('builds/all', include_docs=True, reduce=False)
class BuildSpec(DocumentSchema):
version = StringProperty()
build_number = IntegerProperty(required=False)
latest = BooleanProperty()
def get_build(self):
if self.latest:
return CommCareBuild.get_build(self.version, latest=True)
else:
return CommCareBuild.get_build(self.version, self.build_number)
def is_null(self):
return not (self.version and (self.build_number or self.latest))
def get_label(self):
if not self.is_null():
fmt = "{self.version} "
fmt += "(latest)" if self.latest else "({self.build_number})"
return fmt.format(self=self)
else:
return None
def __str__(self):
fmt = "{self.version}/"
fmt += "latest" if self.latest else "{self.build_number}"
return fmt.format(self=self)
def to_string(self):
return str(self)
@classmethod
def from_string(cls, string):
version, build_number = string.split('/')
if build_number == "latest":
return cls(version=version, latest=True)
else:
build_number = int(build_number)
return cls(version=version, build_number=build_number)
def minor_release(self):
major, minor, _ = self.version.split('.')
return int(major), int(minor)
def major_release(self):
return self.version.split('.')[0]
class BuildMenuItem(DocumentSchema):
build = SchemaProperty(BuildSpec)
label = StringProperty(required=False)
superuser_only = BooleanProperty(default=False)
def get_build(self):
return self.build.get_build()
def get_label(self):
return self.label or self.build.get_label()
class CommCareBuildConfig(Document):
_ID = 'config--commcare-builds'
preview = SchemaProperty(BuildSpec)
defaults = SchemaListProperty(BuildSpec)
application_versions = StringListProperty()
menu = SchemaListProperty(BuildMenuItem)
@classmethod
def bootstrap(cls):
config = cls.wrap(commcare_build_config)
config._id = config._ID
config.save()
return config
@classmethod
def clear_local_cache(cls):
cls.fetch.clear(cls)
@classmethod
@quickcache([], timeout=30)
def fetch(cls):
try:
return cls.get(cls._ID)
except ResourceNotFound:
return cls.bootstrap()
def get_default(self, application_version):
i = self.application_versions.index(application_version)
return self.defaults[i]
def get_menu(self, application_version=None):
if application_version:
major = {
APP_V1: '1',
APP_V2: '2',
}[application_version]
return filter(lambda x: x.build.major_release() == major, self.menu)
else:
return self.menu
class BuildRecord(BuildSpec):
signed = BooleanProperty(default=True)
datetime = DateTimeProperty(required=False)
| bsd-3-clause | 1,979,208,567,887,343,900 | 29.734177 | 196 | 0.590472 | false |
DouglasOrr/DeepLearnTute | dlt/data.py | 1 | 11850 | '''Individual character handwriting data loading and preprocessing.
Based on https://archive.ics.uci.edu/ml/datasets/UJI+Pen+Characters+(Version+2)
'''
import re
import click
import itertools as it
import random
import json
import numpy as np
import h5py
import matplotlib.pyplot as plt
def parse_uji(lines):
'''Parse the UJI pen characters dataset text format.
lines -- a sequence of lines, in the UJIv2 dataset format
yields -- (character, strokes), where character is a unicode string,
strokes is a list numpy arrays of shape (npoints, 2).
'''
COMMENT = re.compile(r'//.*$')
WORD = re.compile(r'WORD (.)')
NUMSTROKES = re.compile(r'NUMSTROKES (\d+)')
POINTS = re.compile(r'POINTS (\d+) # ((?:[-0-9]+ ?)+)')
word = None
numstrokes = None
strokes = None
for line in lines:
line = COMMENT.sub('', line).strip()
if line == '':
continue
m = WORD.match(line)
if m is not None:
word = m.group(1)
continue
if word is None:
raise ValueError('Expected WORD...')
m = NUMSTROKES.match(line)
if m is not None:
numstrokes = int(m.group(1))
strokes = []
continue
if numstrokes is None:
raise ValueError('Expected NUMSTROKES...')
m = POINTS.match(line)
if m is not None:
samples = [int(t) for t in m.group(2).split(' ')]
points = list(zip(samples[::2], samples[1::2]))
if len(points) != int(m.group(1)):
raise ValueError(
"Unexpected number of points (expected %d, actual %d)"
% (int(m.group(1)), len(points)))
strokes.append(np.array(points, dtype=np.float32))
if len(strokes) == numstrokes:
yield (word, strokes)
word = numstrokes = strokes = None
continue
raise ValueError("Input not matched '%s'" % line)
def _first(x):
'''Return the first element of an array or tuple.
'''
return x[0]
def _dump_jsonlines(file, data):
'''Dump a dataset of strokes to a file, in the simple JSONlines format.
'''
for ch, strokes in data:
file.write('%s\n' % json.dumps(dict(
target=ch,
strokes=[stroke.tolist() for stroke in strokes])))
def _rotate(angle):
'''Create a 2D rotation matrix for the given angle.
'''
cos = np.cos(angle)
sin = np.sin(angle)
return np.array([[cos, -sin],
[sin, cos]], dtype=np.float32)
def _scale(x, y):
return np.array([[x, 0],
[0, y]], dtype=np.float32)
def _augmentations(strokes,
stretches=[1.2],
rotations=[0.2, 0.4]):
'''Generate augmented versions of 'strokes', but applying some
rotations & stretching.
strokes -- a multidimensional list [stroke x point x 2], of stroke points
for a single character
yields -- a multidimensional list of the same form of `strokes`
'''
sum_x = sum(sum(x for x, y in stroke) for stroke in strokes)
sum_y = sum(sum(y for x, y in stroke) for stroke in strokes)
n = sum(map(len, strokes))
center = np.array([sum_x, sum_y], dtype=np.float32) / n
norm_strokes = [np.array(stroke) - center for stroke in strokes]
scale_transforms = [_scale(x, 1) for x in ([1] +
stretches +
[1 / s for s in stretches])]
rotate_transforms = [_rotate(a) for a in ([0] +
rotations +
[-r for r in rotations])]
for scale in scale_transforms:
for rotate in rotate_transforms:
tx = np.dot(scale, rotate)
yield [np.dot(trace, tx) + center for trace in norm_strokes]
def _rem_lo(x):
return x - np.floor(x)
def _rem_hi(x):
return np.ceil(x) - x
def _draw_line(start, end):
'''Enumerate coordinates of an antialiased line, using Xiaolin Wu's line
algorithm.
start -- floating point coordinate pair of line start
end -- floating point coordinate of line end
yields -- (x, y, strength) for an antialiased line between start and end,
where x and y are integer coordinates
'''
x0, y0 = start
x1, y1 = end
# Simplify case - only draw "shallow" lines
if abs(x1 - x0) < abs(y1 - y0):
yield from ((x, y, weight)
for y, x, weight in _draw_line((y0, x0), (y1, x1)))
return
# Transform so we run low-to-high-x
if x1 < x0:
x0, y0, x1, y1 = x1, y1, x0, y0
# Note: we know dy <= dx, so gradient <= 1
gradient = 1.0 if x1 == x0 else (y1 - y0) / (x1 - x0)
# Start of line termination
xend0 = int(np.round(x0))
yend0 = y0 + gradient * (xend0 - x0)
yield (xend0, int(yend0), _rem_hi(yend0) * _rem_hi(x0 + 0.5))
yield (xend0, int(yend0) + 1, _rem_lo(yend0) * _rem_hi(x0 + 0.5))
# End of line termination
xend1 = int(np.round(x1))
yend1 = y1 + gradient * (xend1 - x1)
yield (xend1, int(yend1), _rem_hi(yend1) * _rem_lo(x1 + 0.5))
yield (xend1, int(yend1) + 1, _rem_lo(yend1) * _rem_lo(x1 + 0.5))
# Line drawing loop
y = yend0 + gradient
for x in range(xend0 + 1, xend1):
yield (x, int(y), _rem_hi(y))
yield (x, int(y) + 1, _rem_lo(y))
y += gradient
def render(strokes, size):
'''Render a sequence of strokes to a square numpy array of pixels.
strokes -- a list of float[N x 2] numpy arrays, arbitrary coordinates
size -- the side length of the array to render to
returns -- a float[size x size] containing the image (leading index is y)
'''
x_min = min(s[:, 0].min() for s in strokes)
x_max = max(s[:, 0].max() for s in strokes)
y_min = min(s[:, 1].min() for s in strokes)
y_max = max(s[:, 1].max() for s in strokes)
x_scale = (size - 3) * (1 if x_min == x_max else 1 / (x_max - x_min))
y_scale = (size - 3) * (1 if y_min == y_max else 1 / (y_max - y_min))
scale = min(x_scale, y_scale)
x_off = (size - 1) / (2 * scale) - (x_min + x_max) / 2
y_off = (size - 1) / (2 * scale) - (y_min + y_max) / 2
a = np.zeros((size, size), dtype=np.float32)
for stroke in strokes:
coords = [(scale * (p[0] + x_off), scale * (p[1] + y_off))
for p in stroke]
for start, end in zip(coords, coords[1:]):
for x, y, w in _draw_line(start, end):
a[x, y] = max(a[x, y], w)
return np.swapaxes(a, 0, 1)
class Dataset:
'''An in-memory dataset of images & labels.
dataset.x -- (N x D) array of np.float32, flattened images,
where the y-index is major
dataset.y -- (N) array of np.int labels
dataset.vocab -- (L) array of characters corresponding to the
human-readable labels
'''
def __init__(self, x, y, vocab, width, height):
self.x = x
self.y = y
self.vocab = vocab
self.char_to_index = {ch: i for i, ch in enumerate(vocab)}
self.width = width
self.height = height
def __repr__(self):
return 'Dataset[%d images, size %s, from %d labels]' % (
len(self), self.x.shape[-1], len(self.vocab))
def __len__(self):
return self.x.shape[0]
def find_label(self, char):
return np.where(self.y == self.char_to_index[char])[0]
def show(self, indices=None, limit=64):
if indices is None:
indices = range(limit)
xs = self.x[indices]
ys = self.y[indices]
dim = int(np.ceil(np.sqrt(xs.shape[0])))
plt.figure(figsize=(16, 16))
for plot_index, index, x, y in zip(it.count(1), indices, xs, ys):
plt.subplot(dim, dim, plot_index)
plt.imshow(x.reshape(self.height, self.width), cmap='Greys')
plt.title(r"$y_{%d}$ = %s" % (index, self.vocab[y]), fontsize=14)
plt.gca().set_xticks([])
plt.gca().set_yticks([])
plt.gca().grid(False)
def load_hdf5(path):
'''Load a Dataset object from an HDF5 file.
'''
with h5py.File(path, 'r') as f:
x = f['x'][...]
return Dataset(
x=x.reshape(x.shape[0], -1),
y=f['y'][...].astype(np.int32),
vocab=f['vocab'][...],
height=x.shape[1],
width=x.shape[2]
)
@click.command('read')
@click.argument('source', type=click.File('r'))
@click.argument('train', type=click.File('w'))
@click.argument('valid', type=click.File('w'))
@click.argument('test', type=click.File('w'))
@click.option('-f', '--label-filter',
type=click.STRING, default='.',
help='Regex to filter allowable labels.')
@click.option('--nvalid', type=click.INT, default=10,
help='Number of validation examples per label.')
@click.option('--ntest', type=click.INT, default=10,
help='Number of test examples per label.')
@click.option('--seed', type=click.INT, default=42,
help='Seed for random number generation.')
@click.option('--augment/--no-augment', default=True,
help='Should we use data augmentation (rotation & stretching).')
def cli_read(source, train, valid, test, label_filter,
nvalid, ntest, seed, augment):
'''Generate a JSONlines dataset from UJI pen characters.
'''
random.seed(seed)
# Load & filter
data = parse_uji(source)
label_pattern = re.compile(label_filter)
data = filter(lambda x: label_pattern.match(x[0]) is not None,
data)
# Partition
data = it.groupby(sorted(data, key=_first), _first)
train_data = []
valid_data = []
test_data = []
for char, examples in data:
shuffled_examples = list(examples)
random.shuffle(shuffled_examples)
test_data += shuffled_examples[:ntest]
valid_data += shuffled_examples[ntest:(ntest + nvalid)]
train_data += shuffled_examples[(ntest + nvalid):]
# Augment training data
if augment:
train_data = [(ch, ss)
for ch, strokes in train_data
for ss in _augmentations(strokes)]
# Shuffle
random.shuffle(train_data)
random.shuffle(valid_data)
random.shuffle(test_data)
# Save
_dump_jsonlines(train, train_data)
_dump_jsonlines(valid, valid_data)
_dump_jsonlines(test, test_data)
@click.command('render')
@click.argument('input', type=click.File('r'))
@click.argument('output', type=click.Path(dir_okay=False))
@click.option('-n', '--size', default=16, type=click.INT)
def cli_render(input, output, size):
'''Render a JSONlines dataset to numpy arrays, saved in an HDF5 file.
'''
chars = []
images = []
for line in input:
datum = json.loads(line)
chars.append(datum['target'])
images.append(render(
[np.array(s) for s in datum['strokes']],
size))
vocab = list(sorted(set(chars)))
char_to_index = {ch: y for y, ch in enumerate(vocab)}
with h5py.File(output, 'a') as f:
str_dt = h5py.special_dtype(vlen=str)
f.require_dataset(
'vocab', (len(vocab),), dtype=str_dt
)[...] = vocab
f.require_dataset(
'x', shape=(len(images), size, size), dtype=np.float32
)[...] = np.array(images)
f.require_dataset(
'y', shape=(len(chars),), dtype=np.int
)[...] = np.array([char_to_index[ch] for ch in chars])
@click.group()
def cli():
'''Base command for dataset processing.
'''
pass
cli.add_command(cli_read)
cli.add_command(cli_render)
if __name__ == '__main__':
cli()
| mit | -3,253,602,979,370,613,000 | 30.940701 | 79 | 0.556624 | false |
Joev-/HoNCore | honcore/lib/construct/adapters.py | 1 | 15909 | from core import Adapter, AdaptationError, Pass
from lib import int_to_bin, bin_to_int, swap_bytes, StringIO
from lib import FlagsContainer, HexString
#===============================================================================
# exceptions
#===============================================================================
class BitIntegerError(AdaptationError):
__slots__ = []
class MappingError(AdaptationError):
__slots__ = []
class ConstError(AdaptationError):
__slots__ = []
class ValidationError(AdaptationError):
__slots__ = []
class PaddingError(AdaptationError):
__slots__ = []
#===============================================================================
# adapters
#===============================================================================
class BitIntegerAdapter(Adapter):
"""
Adapter for bit-integers (converts bitstrings to integers, and vice versa).
See BitField.
Parameters:
* subcon - the subcon to adapt
* width - the size of the subcon, in bits
* swapped - whether to swap byte order (little endian/big endian).
default is False (big endian)
* signed - whether the value is signed (two's complement). the default
is False (unsigned)
* bytesize - number of bits per byte, used for byte-swapping (if swapped).
default is 8.
"""
__slots__ = ["width", "swapped", "signed", "bytesize"]
def __init__(self, subcon, width, swapped = False, signed = False,
bytesize = 8):
Adapter.__init__(self, subcon)
self.width = width
self.swapped = swapped
self.signed = signed
self.bytesize = bytesize
def _encode(self, obj, context):
if obj < 0 and not self.signed:
raise BitIntegerError("object is negative, but field is not signed",
obj)
obj2 = int_to_bin(obj, width = self.width)
if self.swapped:
obj2 = swap_bytes(obj2, bytesize = self.bytesize)
return obj2
def _decode(self, obj, context):
if self.swapped:
obj = swap_bytes(obj, bytesize = self.bytesize)
return bin_to_int(obj, signed = self.signed)
class MappingAdapter(Adapter):
"""
Adapter that maps objects to other objects.
See SymmetricMapping and Enum.
Parameters:
* subcon - the subcon to map
* decoding - the decoding (parsing) mapping (a dict)
* encoding - the encoding (building) mapping (a dict)
* decdefault - the default return value when the object is not found
in the decoding mapping. if no object is given, an exception is raised.
if `Pass` is used, the unmapped object will be passed as-is
* encdefault - the default return value when the object is not found
in the encoding mapping. if no object is given, an exception is raised.
if `Pass` is used, the unmapped object will be passed as-is
"""
__slots__ = ["encoding", "decoding", "encdefault", "decdefault"]
def __init__(self, subcon, decoding, encoding,
decdefault = NotImplemented, encdefault = NotImplemented):
Adapter.__init__(self, subcon)
self.decoding = decoding
self.encoding = encoding
self.decdefault = decdefault
self.encdefault = encdefault
def _encode(self, obj, context):
try:
return self.encoding[obj]
except (KeyError, TypeError):
if self.encdefault is NotImplemented:
raise MappingError("no encoding mapping for %r" % (obj,))
if self.encdefault is Pass:
return obj
return self.encdefault
def _decode(self, obj, context):
try:
return self.decoding[obj]
except (KeyError, TypeError):
if self.decdefault is NotImplemented:
raise MappingError("no decoding mapping for %r" % (obj,))
if self.decdefault is Pass:
return obj
return self.decdefault
class FlagsAdapter(Adapter):
"""
Adapter for flag fields. Each flag is extracted from the number, resulting
in a FlagsContainer object. Not intended for direct usage.
See FlagsEnum.
Parameters
* subcon - the subcon to extract
* flags - a dictionary mapping flag-names to their value
"""
__slots__ = ["flags"]
def __init__(self, subcon, flags):
Adapter.__init__(self, subcon)
self.flags = flags
def _encode(self, obj, context):
flags = 0
for name, value in self.flags.iteritems():
if getattr(obj, name, False):
flags |= value
return flags
def _decode(self, obj, context):
obj2 = FlagsContainer()
for name, value in self.flags.iteritems():
setattr(obj2, name, bool(obj & value))
return obj2
class StringAdapter(Adapter):
"""
Adapter for strings. Converts a sequence of characters into a python
string, and optionally handles character encoding.
See String.
Parameters:
* subcon - the subcon to convert
* encoding - the character encoding name (e.g., "utf8"), or None to
return raw bytes (usually 8-bit ASCII).
"""
__slots__ = ["encoding"]
def __init__(self, subcon, encoding = None):
Adapter.__init__(self, subcon)
self.encoding = encoding
def _encode(self, obj, context):
if self.encoding:
obj = obj.encode(self.encoding)
return obj
def _decode(self, obj, context):
obj = "".join(obj)
if self.encoding:
obj = obj.decode(self.encoding)
return obj
class PaddedStringAdapter(Adapter):
r"""
Adapter for padded strings.
See String.
Parameters:
* subcon - the subcon to adapt
* padchar - the padding character. default is "\x00".
* paddir - the direction where padding is placed ("right", "left", or
"center"). the default is "right".
* trimdir - the direction where trimming will take place ("right" or
"left"). the default is "right". trimming is only meaningful for
building, when the given string is too long.
"""
__slots__ = ["padchar", "paddir", "trimdir"]
def __init__(self, subcon, padchar = "\x00", paddir = "right",
trimdir = "right"):
if paddir not in ("right", "left", "center"):
raise ValueError("paddir must be 'right', 'left' or 'center'",
paddir)
if trimdir not in ("right", "left"):
raise ValueError("trimdir must be 'right' or 'left'", trimdir)
Adapter.__init__(self, subcon)
self.padchar = padchar
self.paddir = paddir
self.trimdir = trimdir
def _decode(self, obj, context):
if self.paddir == "right":
obj = obj.rstrip(self.padchar)
elif self.paddir == "left":
obj = obj.lstrip(self.padchar)
else:
obj = obj.strip(self.padchar)
return obj
def _encode(self, obj, context):
size = self._sizeof(context)
if self.paddir == "right":
obj = obj.ljust(size, self.padchar)
elif self.paddir == "left":
obj = obj.rjust(size, self.padchar)
else:
obj = obj.center(size, self.padchar)
if len(obj) > size:
if self.trimdir == "right":
obj = obj[:size]
else:
obj = obj[-size:]
return obj
class LengthValueAdapter(Adapter):
"""
Adapter for length-value pairs. It extracts only the value from the
pair, and calculates the length based on the value.
See PrefixedArray and PascalString.
Parameters:
* subcon - the subcon returning a length-value pair
"""
__slots__ = []
def _encode(self, obj, context):
return (len(obj), obj)
def _decode(self, obj, context):
return obj[1]
class CStringAdapter(StringAdapter):
r"""
Adapter for C-style strings (strings terminated by a terminator char).
Parameters:
* subcon - the subcon to convert
* terminators - a sequence of terminator chars. default is "\x00".
* encoding - the character encoding to use (e.g., "utf8"), or None to
return raw-bytes. the terminator characters are not affected by the
encoding.
"""
__slots__ = ["terminators"]
def __init__(self, subcon, terminators = "\x00", encoding = None):
StringAdapter.__init__(self, subcon, encoding = encoding)
self.terminators = terminators
def _encode(self, obj, context):
return StringAdapter._encode(self, obj, context) + self.terminators[0]
def _decode(self, obj, context):
return StringAdapter._decode(self, obj[:-1], context)
class TunnelAdapter(Adapter):
"""
Adapter for tunneling (as in protocol tunneling). A tunnel is construct
nested upon another (layering). For parsing, the lower layer first parses
the data (note: it must return a string!), then the upper layer is called
to parse that data (bottom-up). For building it works in a top-down manner;
first the upper layer builds the data, then the lower layer takes it and
writes it to the stream.
Parameters:
* subcon - the lower layer subcon
* inner_subcon - the upper layer (tunneled/nested) subcon
Example:
# a pascal string containing compressed data (zlib encoding), so first
# the string is read, decompressed, and finally re-parsed as an array
# of UBInt16
TunnelAdapter(
PascalString("data", encoding = "zlib"),
GreedyRange(UBInt16("elements"))
)
"""
__slots__ = ["inner_subcon"]
def __init__(self, subcon, inner_subcon):
Adapter.__init__(self, subcon)
self.inner_subcon = inner_subcon
def _decode(self, obj, context):
return self.inner_subcon._parse(StringIO(obj), context)
def _encode(self, obj, context):
stream = StringIO()
self.inner_subcon._build(obj, stream, context)
return stream.getvalue()
class ExprAdapter(Adapter):
"""
A generic adapter that accepts 'encoder' and 'decoder' as parameters. You
can use ExprAdapter instead of writing a full-blown class when only a
simple expression is needed.
Parameters:
* subcon - the subcon to adapt
* encoder - a function that takes (obj, context) and returns an encoded
version of obj
* decoder - a function that takes (obj, context) and returns an decoded
version of obj
Example:
ExprAdapter(UBInt8("foo"),
encoder = lambda obj, ctx: obj / 4,
decoder = lambda obj, ctx: obj * 4,
)
"""
__slots__ = ["_encode", "_decode"]
def __init__(self, subcon, encoder, decoder):
Adapter.__init__(self, subcon)
self._encode = encoder
self._decode = decoder
class HexDumpAdapter(Adapter):
"""
Adapter for hex-dumping strings. It returns a HexString, which is a string
"""
__slots__ = ["linesize"]
def __init__(self, subcon, linesize = 16):
Adapter.__init__(self, subcon)
self.linesize = linesize
def _encode(self, obj, context):
return obj
def _decode(self, obj, context):
return HexString(obj, linesize = self.linesize)
class ConstAdapter(Adapter):
"""
Adapter for enforcing a constant value ("magic numbers"). When decoding,
the return value is checked; when building, the value is substituted in.
Parameters:
* subcon - the subcon to validate
* value - the expected value
Example:
Const(Field("signature", 2), "MZ")
"""
__slots__ = ["value"]
def __init__(self, subcon, value):
Adapter.__init__(self, subcon)
self.value = value
def _encode(self, obj, context):
if obj is None or obj == self.value:
return self.value
else:
raise ConstError("expected %r, found %r" % (self.value, obj))
def _decode(self, obj, context):
if obj != self.value:
raise ConstError("expected %r, found %r" % (self.value, obj))
return obj
class SlicingAdapter(Adapter):
"""
Adapter for slicing a list (getting a slice from that list)
Parameters:
* subcon - the subcon to slice
* start - start index
* stop - stop index (or None for up-to-end)
* step - step (or None for every element)
"""
__slots__ = ["start", "stop", "step"]
def __init__(self, subcon, start, stop = None):
Adapter.__init__(self, subcon)
self.start = start
self.stop = stop
def _encode(self, obj, context):
if self.start is None:
return obj
return [None] * self.start + obj
def _decode(self, obj, context):
return obj[self.start:self.stop]
class IndexingAdapter(Adapter):
"""
Adapter for indexing a list (getting a single item from that list)
Parameters:
* subcon - the subcon to index
* index - the index of the list to get
"""
__slots__ = ["index"]
def __init__(self, subcon, index):
Adapter.__init__(self, subcon)
if type(index) is not int:
raise TypeError("index must be an integer", type(index))
self.index = index
def _encode(self, obj, context):
return [None] * self.index + [obj]
def _decode(self, obj, context):
return obj[self.index]
class PaddingAdapter(Adapter):
r"""
Adapter for padding.
Parameters:
* subcon - the subcon to pad
* pattern - the padding pattern (character). default is "\x00"
* strict - whether or not to verify, during parsing, that the given
padding matches the padding pattern. default is False (unstrict)
"""
__slots__ = ["pattern", "strict"]
def __init__(self, subcon, pattern = "\x00", strict = False):
Adapter.__init__(self, subcon)
self.pattern = pattern
self.strict = strict
def _encode(self, obj, context):
return self._sizeof(context) * self.pattern
def _decode(self, obj, context):
if self.strict:
expected = self._sizeof(context) * self.pattern
if obj != expected:
raise PaddingError("expected %r, found %r" % (expected, obj))
return obj
#===============================================================================
# validators
#===============================================================================
class Validator(Adapter):
"""
Abstract class: validates a condition on the encoded/decoded object.
Override _validate(obj, context) in deriving classes.
Parameters:
* subcon - the subcon to validate
"""
__slots__ = []
def _decode(self, obj, context):
if not self._validate(obj, context):
raise ValidationError("invalid object", obj)
return obj
def _encode(self, obj, context):
return self._decode(obj, context)
def _validate(self, obj, context):
raise NotImplementedError()
class OneOf(Validator):
"""
Validates that the value is one of the listed values
Parameters:
* subcon - the subcon to validate
* valids - a set of valid values
"""
__slots__ = ["valids"]
def __init__(self, subcon, valids):
Validator.__init__(self, subcon)
self.valids = valids
def _validate(self, obj, context):
return obj in self.valids
class NoneOf(Validator):
"""
Validates that the value is none of the listed values
Parameters:
* subcon - the subcon to validate
* invalids - a set of invalid values
"""
__slots__ = ["invalids"]
def __init__(self, subcon, invalids):
Validator.__init__(self, subcon)
self.invalids = invalids
def _validate(self, obj, context):
return obj not in self.invalids
| unlicense | -5,460,892,987,634,537,000 | 32.006224 | 80 | 0.587466 | false |
gamda/checkers | checkers/model.py | 1 | 14118 | # Copyright (c) 2015 Gamda Software, LLC
#
# See the file LICENSE.txt for copying permission.
from enum import Enum
from gameboard.gameboard import Gameboard
from gameboard.gameboard import Direction
from gameboard.coordinate import Coordinate
class Chip:
class Color(Enum):
white = True
black = False
class Type(Enum):
soldier = 0
queen = 1
def __init__(self, color):
if not isinstance(color, self.Color):
raise ValueError("Use Chip.Color values")
self.color = color
self.type = self.Type.soldier
def promote(self):
self.type = self.Type.queen
class Model:
class Gamestate(Enum):
invalidMove = -1
inProgress = 0
whiteWon = 1
blackWon = 2
tie = 3
def new_game(self):
self.__init__()
def __init__(self):
self.board = Gameboard()
self.chips = {Coordinate.a1: Chip(Chip.Color.white),
Coordinate.c1: Chip(Chip.Color.white),
Coordinate.e1: Chip(Chip.Color.white),
Coordinate.g1: Chip(Chip.Color.white),
Coordinate.b2: Chip(Chip.Color.white),
Coordinate.d2: Chip(Chip.Color.white),
Coordinate.f2: Chip(Chip.Color.white),
Coordinate.h2: Chip(Chip.Color.white),
Coordinate.a3: Chip(Chip.Color.white),
Coordinate.c3: Chip(Chip.Color.white),
Coordinate.e3: Chip(Chip.Color.white),
Coordinate.g3: Chip(Chip.Color.white),
Coordinate.b6: Chip(Chip.Color.black),
Coordinate.d6: Chip(Chip.Color.black),
Coordinate.f6: Chip(Chip.Color.black),
Coordinate.h6: Chip(Chip.Color.black),
Coordinate.a7: Chip(Chip.Color.black),
Coordinate.c7: Chip(Chip.Color.black),
Coordinate.e7: Chip(Chip.Color.black),
Coordinate.g7: Chip(Chip.Color.black),
Coordinate.b8: Chip(Chip.Color.black),
Coordinate.d8: Chip(Chip.Color.black),
Coordinate.f8: Chip(Chip.Color.black),
Coordinate.h8: Chip(Chip.Color.black)}
for k in self.chips.keys():
self.board.set_content(k,self.chips[k])
self.turn = Chip.Color.white
self._current_chip = None
def _neighbor_in_direction(self, square, direction):
neighborSquare = self.board.neighbor_in_direction(square, direction)
return neighborSquare
def _next_neighbor_in_direction(self, square, direction):
neighbor_square = self.board.neighbor_in_direction(square, direction)
if neighbor_square is not None: # check the next
new_neighbor = \
self.board.neighbor_in_direction(neighbor_square, direction)
if new_neighbor is not None:
return new_neighbor
return None
def _enemy_in_neighbor(self, square, direction):
neighbor = self._neighbor_in_direction(square, direction)
return neighbor is not None and \
self.board.get_content(neighbor) is not None and \
self.board.get_content(neighbor).color != self.turn
def _directions_for_soldier(self):
white_directions = [Direction.top_left, Direction.top_right]
black_directions = [Direction.btm_left, Direction.btm_right]
return white_directions \
if self.turn == Chip.Color.white \
else black_directions
def _soldier_available_jumps(self, square):
jumps = set()
for direction in self._directions_for_soldier():
if self._enemy_in_neighbor(square, direction):
next_neighbor = \
self._next_neighbor_in_direction(square, direction)
if next_neighbor is not None and \
self.board.get_content(next_neighbor) is None:
jumps.add((square, next_neighbor))
return jumps
def _soldier_available_regular_moves(self, square):
moves = set()
for direction in self._directions_for_soldier():
neighbor = self._neighbor_in_direction(square, direction)
if neighbor is not None and \
self.board.get_content(neighbor) is None:
# empty square, valid move
moves.add((square, neighbor))
return moves
def _soldier_can_jump(self, square):
return bool(self._soldier_available_jumps(square))
def _soldier_chip_available_moves(self, square):
moves = self._soldier_available_jumps(square)
if len(moves) > 0:
return moves, True
return self._soldier_available_regular_moves(square), False
def _queen_rival_found_moves(self,
origin,
square,
direction,
moves,
can_jump):
my_moves = moves
neighbor = self._neighbor_in_direction(square, direction)
if neighbor is not None:
content = self.board.get_content(neighbor)
if content is None and can_jump:
# another empty square after a jump
my_moves.add((origin, neighbor))
return my_moves, True
elif content is None and not can_jump:
# just found out queen can jump
my_moves = set([(origin, neighbor)])
return my_moves, True
return moves, can_jump # two chips in a row or out of bounds
def _queen_moves_in_direction(self, square, direction):
moves, can_jump = set(), False
neighbor = self._neighbor_in_direction(square, direction)
while neighbor is not None:
content = self.board.get_content(neighbor)
if content is None: # empty
moves.add((square, neighbor))
elif content.color != self. turn: # rival
# rival chip found
old_moves = moves
moves, can_jump = self._queen_rival_found_moves(square,
neighbor,
direction,
moves,
can_jump)
neighbor = self._neighbor_in_direction(neighbor, direction)
if moves == old_moves:
break # two chips in a row or out of bounds
else:
break # ally chip found
neighbor = self._neighbor_in_direction(neighbor, direction)
return moves, can_jump
def _queen_can_jump(self, square):
moves, can_jump = self._queen_chip_available_moves(square)
return can_jump
def _queen_chip_available_moves(self, square):
directions = [Direction.top_left, Direction.top_right,
Direction.btm_left, Direction.btm_right]
moves, can_jump = set(), False
for d in directions:
new_moves, new_can_jump = self._queen_moves_in_direction(square, d)
if can_jump == new_can_jump:
moves = moves | new_moves
elif not can_jump and new_can_jump:
moves = new_moves
can_jump = True
return moves, can_jump
def _chip_can_jump(self, square):
if square in self.chips:
if self.chips[square].type == Chip.Type.soldier:
return self._soldier_can_jump(square)
else:
return self._queen_can_jump(square)
return False
def chip_available_moves(self, square):
"""Return a tuple (set[available_moves], bool can_jump)
Args:
square (Coordinate): the square where the chip is/should be
Returns:
set: tuple of Coordinate values of valid moves for the chip. They
have the form (Coordinate.origin, Coordinate.destination)
bool: True if the chip can jump, False otherwise
"""
if not isinstance(square, Coordinate):
raise TypeError("square variable must be from Coordinate enum")
if square not in self.chips.keys() or \
self.board.get_content(square) is None:
# chip is not in the game anymore
return set(), False
chip = self.chips[square]
if chip.color != self.turn:
return set(), False
if chip.type == Chip.Type.soldier:
return self._soldier_chip_available_moves(square)
return self._queen_chip_available_moves(square)
def available_moves(self):
"""Return a set with tuples of Coordinate values of all available moves
Returns:
set: tuple of Coordinate values of valid moves for the chip. They
have the form (Coordinate.origin, Coordinate.destination)
"""
moves = set()
if self._current_chip is not None:
moves, can_jump = self.chip_available_moves(self._current_chip)
return moves
can_jump = False
for coord, chip in self.chips.items():
newMoves, newcan_jump = self.chip_available_moves(coord)
if can_jump == newcan_jump:
moves = moves | newMoves
elif not can_jump and newcan_jump: # found a jump, delete old moves
moves = newMoves
can_jump = True
# else found regular move, but jump found previously
return moves
def _promote(self, square):
startIndex = 0 if self.turn == Chip.Color.white else 7
promo_squares = []
for i in range(startIndex, 64, 8):
promo_squares.append(Coordinate(i))
if square in promo_squares:
self.chips[square].promote()
def _next_turn(self):
self.turn = Chip.Color.black \
if self.turn == Chip.Color.white \
else Chip.Color.white
def _gamestate(self):
if len(self.available_moves()) == 0:
return self.Gamestate.whiteWon \
if self.turn == Chip.Color.black \
else self.Gamestate.blackWon
return self.Gamestate.inProgress
def _remove_chips(self, origin, destination):
removed = []
direction = self._direction_of_move(origin, destination)
squares_jumped = self.board.path_in_direction(origin,
destination,
direction)
for s in squares_jumped:
if self.board.get_content(s) != None:
self.board.clear_square(s)
del self.chips[s]
removed.append(s)
return removed
def _direction_of_move(self, origin, destination):
distance = destination - origin
direction = None
if distance < 0: # moved left
if distance % 7 == 0: # moved top
direction = Direction.top_left
else: # distance % 9 == 0, moved btm
direction = Direction.btm_left
else: # moved right
if distance % 9 == 0:
direction = Direction.top_right
else:
direction = Direction.btm_right
return direction
def move(self, origin, destination):
"""Perform the requested move and returns a tuple (Gamestate, list)
Args:
origin (Coordinate): the square where the chip is currently
destination (Direction): the square where the chip will end
Returns:
Gamestate: value from enum
list: Coordinate values indicating the chip(s) removed
Raises:
TypeError: if origin or destination is not Coordinate
"""
if not isinstance(origin, Coordinate):
raise TypeError("origin variable must be from Coordinate enum")
if not isinstance(destination, Coordinate):
raise TypeError("destination must be from Coordinate enum")
if not (origin, destination) in self.available_moves():
return self.Gamestate.invalidMove, []
turnFinished = True
_, jumped = self.chip_available_moves(origin)
# move chip
self.board.move(origin, destination)
self.chips[destination] = self.chips[origin]
del self.chips[origin]
self._promote(destination)
# remove chips if jump occured
distance = destination - origin
removed = []
if jumped:
removed = self._remove_chips(origin, destination)
if self._chip_can_jump(destination):
turnFinished = False
self._current_chip = destination
if turnFinished:
self._next_turn()
self._current_chip = None
self._promote(destination)
return (self._gamestate(), removed)
def square_contains_teammate(self, square):
"""Returns True if the chip belongs to the team whose turn it is
Args:
square (Coordinate): the square to check for an ally chip
Returns:
bool: True if the chip belongs to the team whose turn it is
Raises:
TypeError: if square is not Coordinate
"""
if not isinstance(square, Coordinate):
raise TypeError("square variable must be from Coordinate enum")
# Python's lazy evaluation makes sure this expression will never
# throw KeyError because if the key is not in the dictionary, the
# second expression will not be evaluated
return square in self.chips.keys() and \
self.chips[square].color == self.turn
| mit | -4,265,342,469,476,370,000 | 39.107955 | 79 | 0.555461 | false |
chiffa/PolyPharma | bioflow/molecular_network/interactome_analysis.py | 1 | 22116 | """
New analytical routines for the interactome
"""
import pickle
from collections import namedtuple
from csv import reader
from csv import writer as csv_writer
from multiprocessing import Pool
from collections import defaultdict
import traceback
from pprint import pprint
import os
import psutil
from typing import Any, Union, TypeVar, NewType, Tuple, List
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import gumbel_r
from tabulate import tabulate
from bioflow.configs.main_configs import Dumps, estimated_comp_ops, NewOutputs, \
sparse_analysis_threshold, implicitely_threaded, p_val_cutoff, min_nodes_for_p_val
from bioflow.sample_storage.mongodb import find_interactome_rand_samp, count_interactome_rand_samp
from bioflow.configs.main_configs import output_location
from bioflow.molecular_network.InteractomeInterface import InteractomeInterface
from bioflow.utils.dataviz import kde_compute
from bioflow.utils.log_behavior import get_logger
from bioflow.utils.io_routines import get_source_bulbs_ids, get_background_bulbs_ids
from bioflow.utils.general_utils.high_level_os_io import mkdir_recursive
from bioflow.algorithms_bank.flow_significance_evaluation import get_neighboring_degrees, get_p_val_by_gumbel
log = get_logger(__name__)
def get_interactome_interface(background_up_ids=()) -> InteractomeInterface:
"""
Retrieves an "InteractomeInterface" object
:return:
"""
interactome_interface_instance = InteractomeInterface(background_up_ids=background_up_ids)
interactome_interface_instance.fast_load()
log.debug("get_interactome state e_p_u_b_i length: %s",
len(interactome_interface_instance.active_up_sample))
log.info("interactome interface loaded in %s" % interactome_interface_instance.pretty_time())
# is the case now
return interactome_interface_instance
def spawn_sampler(args_puck):
"""
Spawns a sampler initialized from the default GO_Interface.
:param args_puck: combined list of sample sizes, iterations, background sets, and sparse
sampling argument
"""
# log.info('Pool process %d started' % args_puck[-1])
background_set_arg = args_puck[3]
interactome_interface_instance = get_interactome_interface(background_set_arg)
sample_size_list = args_puck[0]
iteration_list = args_puck[1]
sparse_rounds = args_puck[2]
pool_no = args_puck[-1] # TODO: switch over to PID here
interactome_interface_instance.reset_thread_hex()
interactome_interface_instance.randomly_sample(
sample_size_list,
iteration_list,
sparse_rounds,
pool_no=pool_no
)
def spawn_sampler_pool(
pool_size,
sample_size_list,
interaction_list_per_pool,
background_set,
sparse_rounds=False):
"""
Spawns a pool of samplers of the information flow within the GO system
:param pool_size: number of processes that are performing the sample pooling and analyzing
:param sample_size_list: size of the sample list
:param interaction_list_per_pool: number of iterations performing the pooling of the samples
in each list
:param sparse_rounds: number of sparse rounds to run (or False if sampling is dense)
:param background_set: set of node ids that are to be sampled from
"""
payload = [
(sample_size_list,
interaction_list_per_pool,
sparse_rounds,
background_set)]
payload_list = payload * pool_size
payload_list = [list(item)+[i] for i, item in enumerate(payload_list)] # prepare the payload
global implicitely_threaded
if not implicitely_threaded:
with Pool(processes=pool_size) as pool: # This is the object we are using to spawn a thread pool
try:
log.debug('spawning the sampler with payload %s', payload)
pool.map(spawn_sampler, payload_list) # This what we spawn as a sampler
# KNOWNBUG: hangs with no message upon a second start attempt in Interactome
# analysis due to cholmod
except Exception as e:
msg = "{}\n\nOriginal {}".format(e, traceback.format_exc())
raise type(e)(msg)
# log.info('Last in-pool flag exiting')
pool.terminate()
# log.info('Pool terminated')
else:
log.debug('spawning single-thread sampler with payload %s', payload)
for _payload in payload_list:
spawn_sampler(_payload)
def local_indexed_select(bi_array, array_column, selection_span):
"""
Convenient small function to select a from tri_array all the elements where the column
number array_column is within the selection span
:param bi_array: the matrix on which we will be performing the selection
:param array_column: column number on which the selection span will be applied
:param selection_span: span for which we are going to keep the column.
"""
selector = np.logical_and(
selection_span[0] < bi_array[array_column, :],
bi_array[array_column, :] < selection_span[1])
if not any(selector):
return np.array([[0.0, 0.0, 0.0]])
filtered_bi_array = bi_array[:, selector]
return filtered_bi_array
def samples_scatter_and_hist(background_curr_deg_conf, true_sample_bi_corr_array,
save_path: NewOutputs = None, p_values: np.array = None):
"""
A general function that performs demonstration of an example of random samples of
the same size as our sample and of our sample and conducts the statistical tests
on wherther any of nodes or functional groups in our sample are non-random
:param background_curr_deg_conf: [[current, informativity, confusion_potential], ...] -
characteristics of the random samples
:param true_sample_bi_corr_array: [[current, informativity, confusion_potential], ...] -
characteristics of the true sample. If none, nothing happens
:param save_path: where the thing will be saved
:param p_values: p-value map that will be used to save things after the analysis
:return: None
"""
fig = plt.figure()
fig.set_size_inches(30, 20)
# bivect: [0, :] - current; [1, :] - informativity
plt.subplot(211)
plt.title('current through nodes')
bins = np.linspace(
background_curr_deg_conf[0, :].min(),
background_curr_deg_conf[0, :].max(), 100)
if true_sample_bi_corr_array is not None:
bins = np.linspace(min(background_curr_deg_conf[0, :].min(),
true_sample_bi_corr_array[0, :].min()),
max(background_curr_deg_conf[0, :].max(),
true_sample_bi_corr_array[0, :].max()),
100)
plt.hist(background_curr_deg_conf[0, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_bi_corr_array is not None:
plt.hist(true_sample_bi_corr_array[0, :],
bins=bins, histtype='step', log=True, color='r')
plt.subplot(212)
plt.scatter(background_curr_deg_conf[1, :],
background_curr_deg_conf[0, :], color='b', alpha=0.1)
if true_sample_bi_corr_array is not None:
if p_values is not None:
_filter = p_values < p_val_cutoff
anti_filter = np.logical_not(_filter)
plt.scatter(true_sample_bi_corr_array[1, anti_filter],
true_sample_bi_corr_array[0, anti_filter],
color='gray', alpha=0.25)
plt.scatter(true_sample_bi_corr_array[1, _filter],
true_sample_bi_corr_array[0, _filter],
color='r', alpha=0.7)
else:
plt.scatter(true_sample_bi_corr_array[1, :],
true_sample_bi_corr_array[0, :],
color='r', alpha=0.5)
# plt.show()
plt.savefig(save_path.interactome_network_scatterplot)
plt.clf()
def compare_to_blank(blank_model_size: int,
interactome_interface_instance: InteractomeInterface,
p_val: float = 0.05,
sparse_rounds: bool = False,
output_destination: NewOutputs = None) -> Tuple[list, dict]:
"""
Recovers the statistics on the circulation nodes and shows the visual of a circulation system.
There is no issue with using the same interactome interface instance, because they are forked when
threads are generated and will not interfere.
:param blank_model_size: the number of uniprots in the blank model
:param p_val: desired p_value for the returned terms
:param sparse_rounds: if set to a number, sparse computation technique would be used
with the number of rounds equal the integer value of that argument
:param interactome_interface_instance:
:return: None if no significant nodes, the node and group characteristic
dictionaries otherwise
"""
def get_max_for_each_degree(sample_sub_arrray):
# print('debug max_array_shape:', str(sample_sub_arrray.shape))
degrees = np.unique(sample_sub_arrray[1, :])
max_array = []
for degree in degrees:
filter = sample_sub_arrray[1, :] == degree
max_array.append([sample_sub_arrray[0, filter].max(), degree])
m_arr = np.array(max_array)
return m_arr.T
if interactome_interface_instance is None or interactome_interface_instance.node_current == {}:
raise Exception("tried to compare to blanc an empty interface instance")
md5_hash = interactome_interface_instance.md5_hash()
background_sub_array_list = []
max_sub_array_list = []
count = 0
log.info("looking to test against:"
"\t size: %s \t sys_hash: %s \t sparse_rounds: %s" %
(blank_model_size, md5_hash, sparse_rounds))
log.info("samples found to test against:\t %s" %
count_interactome_rand_samp({'size': blank_model_size,
'sys_hash': md5_hash,
'sparse_rounds': sparse_rounds}))
background_sample = find_interactome_rand_samp({'size': blank_model_size,
'sys_hash': md5_hash,
'sparse_rounds': sparse_rounds})
for i, sample in enumerate(background_sample):
_, node_currents = pickle.loads(sample['currents'])
dict_system = interactome_interface_instance.format_node_props(node_currents, limit=0)
background_sub_array = list(dict_system.values())
if np.array(background_sub_array).T.shape[0] < 2:
log.info(background_sub_array)
continue
background_sub_array_list.append(np.array(background_sub_array).T)
# print(np.array(background_sub_array).T.shape)
# pprint(background_sub_array)
max_arr = get_max_for_each_degree(np.array(background_sub_array).T)
max_sub_array_list.append(max_arr)
count = i
# This part declares the pre-operators required for the verification of a
# real sample
background_array = np.concatenate(tuple(background_sub_array_list), axis=1)
max_array = np.concatenate(tuple(max_sub_array_list), axis=1)
node_currents = interactome_interface_instance.node_current
dict_system = interactome_interface_instance.format_node_props(node_currents)
curr_inf_conf_tot = np.array([[int(key)] + list(val) for key, val in list(dict_system.items())]).T
node_ids, query_array = (curr_inf_conf_tot[0, :], curr_inf_conf_tot[(1, 2), :])
log.info("stats on %s samples" % count)
background_density = kde_compute(background_array[(1, 0), :], 50, count)
base_bi_corr = background_array[(0, 1), :]
r_rels = []
r_std_nodes = []
degrees = np.unique(query_array[1, :])
combined_p_vals = np.ones_like(query_array[1, :])
for degree in degrees.tolist():
_filter = query_array[1, :] == degree
entry = query_array[:, _filter]
background_set = background_array[:, background_array[1, :] == degree]
max_current_per_run = get_neighboring_degrees(degree,
max_array,
min_nodes=min_nodes_for_p_val)
p_vals = get_p_val_by_gumbel(entry, max_current_per_run)
combined_p_vals[_filter] = p_vals
samples_scatter_and_hist(background_array, query_array,
save_path=output_destination,
p_values=combined_p_vals)
r_nodes = background_density(query_array[(1, 0), :]) # legacy - unused now
r_nodes = combined_p_vals
for point in query_array.T:
selector = np.logical_and(base_bi_corr[1, :] > point[1]*0.9, base_bi_corr[1, :] < point[1]*1.1)
r_rels.append(point[0] / np.mean(base_bi_corr[0, selector]))
r_std_nodes.append((point[0] - np.mean(base_bi_corr[0, selector])) / np.std(base_bi_corr[0,
selector]))
r_rels = np.array(r_rels)
r_std_nodes = np.array(r_std_nodes)
not_random_nodes = [node_id for node_id in node_ids[r_nodes < p_val].tolist()]
# basically the second element below are the nodes that contribute to the
# information flow through the node that is considered as non-random
log.debug('debug, not random nodes: %s', not_random_nodes)
log.debug('debug bulbs_id_disp_name: %s',
list(interactome_interface_instance.neo4j_id_2_display_name.items())[:10])
node_char_list = [
[int(nr_node_id), interactome_interface_instance.neo4j_id_2_display_name[nr_node_id]] +
dict_system[nr_node_id] + r_nodes[node_ids == float(nr_node_id)].tolist()
for nr_node_id in not_random_nodes]
nodes_dict = np.hstack((node_ids[:, np.newaxis],
r_nodes[:, np.newaxis],
r_rels[:, np.newaxis],
r_std_nodes[:, np.newaxis]))
nodes_dict = dict((node[0], (node[1], node[2], node[3])) for node in nodes_dict.tolist())
nodes_dict = defaultdict(lambda: (1., 0., 0.), nodes_dict) # corresponds to the cases of super low flow - never significant
# TODO: pull the groups corresponding to non-random associations.
# => Will not implement, it's already done by Gephi
return sorted(node_char_list, key=lambda x: x[4]), nodes_dict
# TODO: [weighted inputs] add support for a dict as source_list, not only list
def auto_analyze(source_list: List[List[int]],
output_destinations_list: Union[List[str], None] = None,
desired_depth: int = 24,
processors: int = 0,
background_list: Union[List[int], None] = None,
skip_sampling: bool = False,
p_value_cutoff: float = -1,
) -> None:
"""
Automatically analyzes the itneractome synergetic action of the RNA_seq results
:param source_list: python list of hits for each condition
:param output_destinations_list: list of names for each condition
:param desired_depth: total samples we would like to compare each set of hits with
:param processors: number of processes that will be loaded. as a rule of thumb,
for max performance, use N-1 processors, where N is the number of physical cores on the
machine, which is the default
:param background_list list of physical entities that an experimental method can retrieve
:param skip_sampling: if true, will skip background sampling step
"""
# Multiple re-spawns of threaded processing are incompatbile with scikits.sparse.cholmod
if len(source_list) > 1:
global implicitely_threaded
implicitely_threaded = True
if len(output_destinations_list) != len(source_list):
log.warning('Output destination list has %d elements, whereas %d sources were supplied. '
'Falling back to default output structure')
output_destinations_list = None
if output_destinations_list is None:
output_destinations_list = list(range(len(source_list)))
if processors == 0:
processors = psutil.cpu_count() - 1
log.info("Setting processor count to default: %s" % processors)
# TODO: [Better Sampling]
# check MongoDb to see if we have enough samples of the needed type, adjust the sampling
# noinspection PyTypeChecker
if desired_depth % processors != 0:
desired_depth = desired_depth // processors + 1
else:
desired_depth = desired_depth // processors
if p_value_cutoff < 0:
p_value_cutoff = p_val_cutoff
for hits_list, output_destination in zip(source_list, output_destinations_list):
log.info('Auto analyzing list of interest: %s', len(hits_list))
outputs_subdirs = NewOutputs(output_destination)
interactome_interface = get_interactome_interface(background_up_ids=background_list)
interactome_interface.set_uniprot_source(list(hits_list))
log.debug(" e_p_u_b_i length after UP_source was set: %s",
len(interactome_interface.active_up_sample))
if not skip_sampling:
log.info("spawning a sampler for %s proteins @ %s compops/sec",
len(interactome_interface.active_up_sample), estimated_comp_ops)
# dense analysis
if len(interactome_interface.active_up_sample) < sparse_analysis_threshold:
if not skip_sampling:
log.info('length: %s \t sampling depth: %s \t, estimated round time: %s min',
len(interactome_interface.active_up_sample),
'full',
len(interactome_interface.active_up_sample) ** 2 /
estimated_comp_ops / 60)
spawn_sampler_pool(
processors,
[len(interactome_interface.active_up_sample)],
[desired_depth],
background_set=background_list)
interactome_interface.compute_current_and_potentials()
nr_nodes, p_val_dict = compare_to_blank(
len(interactome_interface.active_up_sample),
interactome_interface,
p_val=p_value_cutoff,
output_destination=outputs_subdirs
)
# sparse analysis
else:
ceiling = min(205, len(interactome_interface.active_up_sample))
sampling_depth = max((ceiling - 5) ** 2 //
len(interactome_interface.active_up_sample),
5)
if not skip_sampling:
log.info('length: %s \t sampling depth: %s \t, estimated round time: %s min',
len(interactome_interface.active_up_sample),
sampling_depth,
len(interactome_interface.active_up_sample) *
sampling_depth / 2 / 60 / estimated_comp_ops)
spawn_sampler_pool(processors,
[len(interactome_interface.active_up_sample)],
[desired_depth],
sparse_rounds=sampling_depth,
background_set=background_list)
log.info('real run characteristics: sys_hash: %s, size: %s, sparse_rounds: %s' %
(interactome_interface.md5_hash(),
len(interactome_interface.active_up_sample), sampling_depth))
interactome_interface.compute_current_and_potentials(sparse_samples=sampling_depth)
nr_nodes, p_val_dict = compare_to_blank(
len(interactome_interface.active_up_sample),
interactome_interface,
p_val=p_value_cutoff,
sparse_rounds=sampling_depth,
output_destination=outputs_subdirs
)
interactome_interface.export_conduction_system(p_val_dict,
output_location=outputs_subdirs.Interactome_GDF_output)
# # old results print-out
# log.info('\t %s \t %s \t %s \t %s \t %s', 'node id',
# 'display name', 'info flow', 'degree', 'p value')
#
# for node in nr_nodes:
# log.info('\t %s \t %s \t %.3g \t %d \t %.3g', *node)
with open(outputs_subdirs.interactome_network_output, 'wt') as output:
writer = csv_writer(output, delimiter='\t')
writer.writerow(['node id', 'display name', 'info flow', 'degree', 'p value'])
for node in nr_nodes:
writer.writerow(node)
# using tabulate
headers = ['node id', 'display name', 'info flow', 'degree', 'p value']
print(tabulate(nr_nodes, headers, tablefmt='simple', floatfmt=".3g"))
if __name__ == "__main__":
# pprinter = PrettyPrinter(indent=4)
# background_set = MatrixGetter(True, False)
# background_set.fast_load()
# dumplist = undump_object(Dumps.RNA_seq_counts_compare)
# MG1.randomly_sample([150], [1], chromosome_specific=15, No_add=True)
# nr_nodes, nr_groups = compare_to_blanc(150, [0.5, 0.6], MG1, p_val=0.9)
# MG1.export_conduction_system()
# for group in nr_groups:
# print group
# for node in nr_nodes:
# print node
# source = get_source_bulbs_ids()
# background_list = get_background_bulbs_ids()
# auto_analyze([source], desired_depth=5, processors=6,
# background_list=background_list, skip_sampling=True)
local_matrix = InteractomeInterface()
local_matrix.fast_load()
# spawn_sampler_pool(3, [50], [3], background_set=None)
spawn_sampler(([50], [3], False, None, 0))
# local_matrix.randomly_sample([195], [10], sparse_rounds=195)
| bsd-3-clause | 3,543,690,738,254,189,000 | 40.261194 | 128 | 0.614532 | false |
mbylstra/django-wham | wham/fields.py | 1 | 3400 | from django.db import models
# the following will be required if we want to support south
# ----------------------------------------------------------------
# from south.modelsinspector import add_introspection_rules
#
# add_introspection_rules([], [
# "^wham\.models\.WhamCharField",
# "^wham\.models\.WhamTextField",
# "^wham\.models\.WhamIntegerField",
# "^wham\.models\.WhamFloatField",
# "^wham\.models\.WhamManyToManyField",
# "^wham\.models\.WhamDateField",
# "^wham\.models\.WhamDateTimeField",
# "^wham\.models\.WhamImageUrlField",
# ])
class WhamFieldMixin(object):
def __init__(self, *args, **kwargs):
self.wham_result_path = kwargs.pop('wham_result_path', None)
self.wham_can_lookup = kwargs.pop('wham_can_lookup', False)
self.wham_url_param = kwargs.pop('wham_url_param', None)
self.wham_detailed = kwargs.pop('wham_detailed', False)
return super(WhamFieldMixin, self).__init__(*args, **kwargs)
def get_result_path(self):
result_path = self.wham_result_path
if not result_path:
return (self.attname,)
else:
return result_path
def get_url_param(self):
return self.wham_url_param if self.wham_url_param else self.name
class WhamCharField(WhamFieldMixin, models.TextField):
@property
def type_repr(self):
return 'char'
class WhamTextField(WhamFieldMixin, models.TextField):
@property
def type_repr(self):
return 'text'
class WhamIntegerField(WhamFieldMixin, models.IntegerField):
@property
def type_repr(self):
return 'integer'
class WhamFloatField(WhamFieldMixin, models.FloatField):
@property
def type_repr(self):
return 'float'
class WhamDateField(WhamFieldMixin, models.DateField):
pass
class WhamDateTimeField(WhamFieldMixin, models.DateTimeField):
def __init__(self, *args, **kwargs):
self.wham_format = kwargs.pop('wham_format', None)
return super(WhamDateTimeField, self).__init__(*args, **kwargs)
class WhamManyToManyField(models.ManyToManyField):
def __init__(self, *args, **kwargs):
self.wham_result_path = kwargs.pop('wham_result_path', None)
self.wham_endpoint = kwargs.pop('wham_endpoint', None)
self.wham_results_path = kwargs.pop('wham_results_path', ())
self.wham_pk_param = kwargs.pop('wham_pk_param', None)
self.wham_params = kwargs.pop('wham_params', {})
return super(WhamManyToManyField, self).__init__(*args, **kwargs)
@property
def type_repr(self):
return 'many to many'
class WhamForeignKey(models.ForeignKey):
def __init__(self, *args, **kwargs):
self.wham_result_path = kwargs.pop('wham_result_path', None)
self.wham_endpoint = kwargs.pop('wham_endpoint', None)
self.wham_results_path = kwargs.pop('wham_results_path', ())
self.wham_pk_param = kwargs.pop('wham_pk_param', None)
self.wham_params = kwargs.pop('wham_params', {})
return super(WhamForeignKey, self).__init__(*args, **kwargs)
def get_result_path(self):
result_path = self.wham_result_path
if not result_path:
return (self.name,)
else:
return result_path
@property
def type_repr(self):
return 'foreign key'
class WhamImageUrlField(WhamTextField):
pass
| mit | 8,305,047,103,216,442,000 | 29.088496 | 73 | 0.631765 | false |
wakiyamap/electrum-mona | electrum_mona/gui/qt/password_dialog.py | 1 | 11128 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2013 ecdsa@github
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import math
from functools import partial
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QLineEdit, QLabel, QGridLayout, QVBoxLayout, QCheckBox
from electrum_mona.i18n import _
from electrum_mona.plugin import run_hook
from .util import (icon_path, WindowModalDialog, OkButton, CancelButton, Buttons,
PasswordLineEdit)
def check_password_strength(password):
'''
Check the strength of the password entered by the user and return back the same
:param password: password entered by user in New Password
:return: password strength Weak or Medium or Strong
'''
password = password
n = math.log(len(set(password)))
num = re.search("[0-9]", password) is not None and re.match("^[0-9]*$", password) is None
caps = password != password.upper() and password != password.lower()
extra = re.match("^[a-zA-Z0-9]*$", password) is None
score = len(password)*(n + caps + num + extra)/20
password_strength = {0:"Weak",1:"Medium",2:"Strong",3:"Very Strong"}
return password_strength[min(3, int(score))]
PW_NEW, PW_CHANGE, PW_PASSPHRASE = range(0, 3)
class PasswordLayout(object):
titles = [_("Enter Password"), _("Change Password"), _("Enter Passphrase")]
def __init__(self, msg, kind, OK_button, wallet=None, force_disable_encrypt_cb=False):
self.wallet = wallet
self.pw = PasswordLineEdit()
self.new_pw = PasswordLineEdit()
self.conf_pw = PasswordLineEdit()
self.kind = kind
self.OK_button = OK_button
vbox = QVBoxLayout()
label = QLabel(msg + "\n")
label.setWordWrap(True)
grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnMinimumWidth(0, 150)
grid.setColumnMinimumWidth(1, 100)
grid.setColumnStretch(1,1)
if kind == PW_PASSPHRASE:
vbox.addWidget(label)
msgs = [_('Passphrase:'), _('Confirm Passphrase:')]
else:
logo_grid = QGridLayout()
logo_grid.setSpacing(8)
logo_grid.setColumnMinimumWidth(0, 70)
logo_grid.setColumnStretch(1,1)
logo = QLabel()
logo.setAlignment(Qt.AlignCenter)
logo_grid.addWidget(logo, 0, 0)
logo_grid.addWidget(label, 0, 1, 1, 2)
vbox.addLayout(logo_grid)
m1 = _('New Password:') if kind == PW_CHANGE else _('Password:')
msgs = [m1, _('Confirm Password:')]
if wallet and wallet.has_password():
grid.addWidget(QLabel(_('Current Password:')), 0, 0)
grid.addWidget(self.pw, 0, 1)
lockfile = "lock.png"
else:
lockfile = "unlock.png"
logo.setPixmap(QPixmap(icon_path(lockfile))
.scaledToWidth(36, mode=Qt.SmoothTransformation))
grid.addWidget(QLabel(msgs[0]), 1, 0)
grid.addWidget(self.new_pw, 1, 1)
grid.addWidget(QLabel(msgs[1]), 2, 0)
grid.addWidget(self.conf_pw, 2, 1)
vbox.addLayout(grid)
# Password Strength Label
if kind != PW_PASSPHRASE:
self.pw_strength = QLabel()
grid.addWidget(self.pw_strength, 3, 0, 1, 2)
self.new_pw.textChanged.connect(self.pw_changed)
self.encrypt_cb = QCheckBox(_('Encrypt wallet file'))
self.encrypt_cb.setEnabled(False)
grid.addWidget(self.encrypt_cb, 4, 0, 1, 2)
if kind == PW_PASSPHRASE:
self.encrypt_cb.setVisible(False)
def enable_OK():
ok = self.new_pw.text() == self.conf_pw.text()
OK_button.setEnabled(ok)
self.encrypt_cb.setEnabled(ok and bool(self.new_pw.text())
and not force_disable_encrypt_cb)
self.new_pw.textChanged.connect(enable_OK)
self.conf_pw.textChanged.connect(enable_OK)
self.vbox = vbox
def title(self):
return self.titles[self.kind]
def layout(self):
return self.vbox
def pw_changed(self):
password = self.new_pw.text()
if password:
colors = {"Weak":"Red", "Medium":"Blue", "Strong":"Green",
"Very Strong":"Green"}
strength = check_password_strength(password)
label = (_("Password Strength") + ": " + "<font color="
+ colors[strength] + ">" + strength + "</font>")
else:
label = ""
self.pw_strength.setText(label)
def old_password(self):
if self.kind == PW_CHANGE:
return self.pw.text() or None
return None
def new_password(self):
pw = self.new_pw.text()
# Empty passphrases are fine and returned empty.
if pw == "" and self.kind != PW_PASSPHRASE:
pw = None
return pw
def clear_password_fields(self):
for field in [self.pw, self.new_pw, self.conf_pw]:
field.clear()
class PasswordLayoutForHW(object):
def __init__(self, msg, wallet=None):
self.wallet = wallet
vbox = QVBoxLayout()
label = QLabel(msg + "\n")
label.setWordWrap(True)
grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnMinimumWidth(0, 150)
grid.setColumnMinimumWidth(1, 100)
grid.setColumnStretch(1,1)
logo_grid = QGridLayout()
logo_grid.setSpacing(8)
logo_grid.setColumnMinimumWidth(0, 70)
logo_grid.setColumnStretch(1,1)
logo = QLabel()
logo.setAlignment(Qt.AlignCenter)
logo_grid.addWidget(logo, 0, 0)
logo_grid.addWidget(label, 0, 1, 1, 2)
vbox.addLayout(logo_grid)
if wallet and wallet.has_storage_encryption():
lockfile = "lock.png"
else:
lockfile = "unlock.png"
logo.setPixmap(QPixmap(icon_path(lockfile))
.scaledToWidth(36, mode=Qt.SmoothTransformation))
vbox.addLayout(grid)
self.encrypt_cb = QCheckBox(_('Encrypt wallet file'))
grid.addWidget(self.encrypt_cb, 1, 0, 1, 2)
self.vbox = vbox
def title(self):
return _("Toggle Encryption")
def layout(self):
return self.vbox
class ChangePasswordDialogBase(WindowModalDialog):
def __init__(self, parent, wallet):
WindowModalDialog.__init__(self, parent)
is_encrypted = wallet.has_storage_encryption()
OK_button = OkButton(self)
self.create_password_layout(wallet, is_encrypted, OK_button)
self.setWindowTitle(self.playout.title())
vbox = QVBoxLayout(self)
vbox.addLayout(self.playout.layout())
vbox.addStretch(1)
vbox.addLayout(Buttons(CancelButton(self), OK_button))
self.playout.encrypt_cb.setChecked(is_encrypted)
def create_password_layout(self, wallet, is_encrypted, OK_button):
raise NotImplementedError()
class ChangePasswordDialogForSW(ChangePasswordDialogBase):
def __init__(self, parent, wallet):
ChangePasswordDialogBase.__init__(self, parent, wallet)
if not wallet.has_password():
self.playout.encrypt_cb.setChecked(True)
def create_password_layout(self, wallet, is_encrypted, OK_button):
if not wallet.has_password():
msg = _('Your wallet is not protected.')
msg += ' ' + _('Use this dialog to add a password to your wallet.')
else:
if not is_encrypted:
msg = _('Your bitcoins are password protected. However, your wallet file is not encrypted.')
else:
msg = _('Your wallet is password protected and encrypted.')
msg += ' ' + _('Use this dialog to change your password.')
self.playout = PasswordLayout(msg=msg,
kind=PW_CHANGE,
OK_button=OK_button,
wallet=wallet,
force_disable_encrypt_cb=not wallet.can_have_keystore_encryption())
def run(self):
try:
if not self.exec_():
return False, None, None, None
return True, self.playout.old_password(), self.playout.new_password(), self.playout.encrypt_cb.isChecked()
finally:
self.playout.clear_password_fields()
class ChangePasswordDialogForHW(ChangePasswordDialogBase):
def __init__(self, parent, wallet):
ChangePasswordDialogBase.__init__(self, parent, wallet)
def create_password_layout(self, wallet, is_encrypted, OK_button):
if not is_encrypted:
msg = _('Your wallet file is NOT encrypted.')
else:
msg = _('Your wallet file is encrypted.')
msg += '\n' + _('Note: If you enable this setting, you will need your hardware device to open your wallet.')
msg += '\n' + _('Use this dialog to toggle encryption.')
self.playout = PasswordLayoutForHW(msg)
def run(self):
if not self.exec_():
return False, None
return True, self.playout.encrypt_cb.isChecked()
class PasswordDialog(WindowModalDialog):
def __init__(self, parent=None, msg=None):
msg = msg or _('Please enter your password')
WindowModalDialog.__init__(self, parent, _("Enter Password"))
self.pw = pw = PasswordLineEdit()
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Password')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(self), OkButton(self)))
self.setLayout(vbox)
run_hook('password_dialog', pw, grid, 1)
def run(self):
try:
if not self.exec_():
return
return self.pw.text()
finally:
self.pw.clear()
| mit | 7,213,216,465,206,087,000 | 33.993711 | 118 | 0.604421 | false |
rhyolight/nupic.research | htmresearch/frameworks/pytorch/sparse_speech_experiment.py | 1 | 17584 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import print_function
import os
import time
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import WeightedRandomSampler
from htmresearch.frameworks.pytorch.benchmark_utils import (
register_nonzero_counter, unregister_counter_nonzero)
from htmresearch.support.expsuite import PyExperimentSuite
from htmresearch.frameworks.pytorch.sparse_net import SparseNet
from htmresearch.frameworks.pytorch.duty_cycle_metrics import plotDutyCycles
from htmresearch.frameworks.pytorch.speech_commands_dataset import (
SpeechCommandsDataset, BackgroundNoiseDataset
)
from htmresearch.frameworks.pytorch.audio_transforms import *
from htmresearch.frameworks.pytorch.resnet_models import resnet9
class SparseSpeechExperiment(PyExperimentSuite):
"""
This experiment tests the Google Speech Commands dataset, available here:
http://download.tensorflow.org/data/speech_commands_v0.01.tar
Allows running multiple sparse speech experiments in parallel
"""
def parse_cfg(self):
super(SparseSpeechExperiment, self).parse_cfg()
# Change the current working directory to be relative to 'experiments.cfg'
projectDir = os.path.dirname(self.options.config)
projectDir = os.path.abspath(projectDir)
os.chdir(projectDir)
def reset(self, params, repetition):
"""
Called once at the beginning of each experiment.
"""
self.startTime = time.time()
print(params)
torch.manual_seed(params["seed"] + repetition)
np.random.seed(params["seed"] + repetition)
# Get our directories correct
self.dataDir = os.path.join(params["datadir"], "speech_commands")
self.resultsDir = os.path.join(params["path"], params["name"], "plots")
if not os.path.exists(self.resultsDir):
os.makedirs(self.resultsDir)
self.use_cuda = not params["no_cuda"] and torch.cuda.is_available()
if self.use_cuda:
print("*********using cuda!")
self.device = torch.device("cuda" if self.use_cuda else "cpu")
self.loadDatasets(params)
# Parse 'n' and 'k' parameters
n = params["n"]
k = params["k"]
if isinstance(n, basestring):
n = map(int, n.split("_"))
if isinstance(k, basestring):
k = map(int, k.split("_"))
if params["model_type"] == "cnn":
c1_out_channels = params["c1_out_channels"]
c1_k = params["c1_k"]
if isinstance(c1_out_channels, basestring):
c1_out_channels = map(int, c1_out_channels.split("_"))
if isinstance(c1_k, basestring):
c1_k = map(int, c1_k.split("_"))
sp_model = SparseNet(
inputSize=params.get("c1_input_shape", (1, 32, 32)),
outputSize=len(self.train_loader.dataset.classes),
outChannels=c1_out_channels,
c_k=c1_k,
dropout=params["dropout"],
n=n,
k=k,
boostStrength=params["boost_strength"],
weightSparsity=params["weight_sparsity"],
boostStrengthFactor=params["boost_strength_factor"],
kInferenceFactor=params["k_inference_factor"],
useBatchNorm=params["use_batch_norm"],
)
print("c1OutputLength=", sp_model.cnnSdr[0].outputLength)
elif params["model_type"] == "resnet9":
sp_model = resnet9(num_classes=len(self.train_loader.dataset.classes),
in_channels=1)
elif params["model_type"] == "linear":
sp_model = SparseNet(
n=n,
k=k,
inputSize=32*32,
outputSize=len(self.train_loader.dataset.classes),
boostStrength=params["boost_strength"],
weightSparsity=params["weight_sparsity"],
boostStrengthFactor=params["boost_strength_factor"],
kInferenceFactor=params["k_inference_factor"],
dropout=params["dropout"],
useBatchNorm=params["use_batch_norm"],
)
else:
raise RuntimeError("Unknown model type")
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
sp_model = torch.nn.DataParallel(sp_model)
self.model = sp_model.to(self.device)
self.learningRate = params["learning_rate"]
self.optimizer = self.createOptimizer(params, self.model)
self.lr_scheduler = self.createLearningRateScheduler(params, self.optimizer)
def iterate(self, params, repetition, iteration):
"""
Called once for each training iteration (== epoch here).
"""
print("\nStarting iteration",iteration)
print("Learning rate:", self.learningRate if self.lr_scheduler is None
else self.lr_scheduler.get_lr())
t1 = time.time()
ret = {}
# Update learning rate using learning rate scheduler if configured
if self.lr_scheduler is not None:
# ReduceLROnPlateau lr_scheduler step should be called after validation,
# all other lr_schedulers should be called before training
if params["lr_scheduler"] != "ReduceLROnPlateau":
self.lr_scheduler.step()
self.train(params, epoch=iteration)
# Run validation test
if self.validation_loader is not None:
validation = self.test(params, self.validation_loader)
# ReduceLROnPlateau step should be called after validation
if params["lr_scheduler"] == "ReduceLROnPlateau":
self.lr_scheduler.step(validation["test_loss"])
ret["validation"] = validation
print("Validation: error=", validation["testerror"],
"entropy=", validation["entropy"],
"loss=", validation["test_loss"])
ret.update({"validationerror": validation["testerror"]})
# Run test set
if self.test_loader is not None:
testResults = self.test(params, self.test_loader)
ret["testResults"] = testResults
print("Test: error=", testResults["testerror"],
"entropy=", testResults["entropy"],
"loss=", testResults["test_loss"])
ret.update({"testerror": testResults["testerror"]})
# Run bg noise set
if self.bg_noise_loader is not None:
bgResults = self.test(params, self.bg_noise_loader)
ret["bgResults"] = bgResults
print("BG noise error=", bgResults["testerror"])
ret.update({"bgerror": bgResults["testerror"]})
ret.update({"elapsedTime": time.time() - self.startTime})
ret.update({"learningRate": self.learningRate if self.lr_scheduler is None
else self.lr_scheduler.get_lr()})
# Run noise set
ret.update(self.runNoiseTests(params))
print("Noise test results: totalCorrect=", ret["totalCorrect"],
"Test error=", ret["testerror"], ", entropy=", ret["entropy"])
ret.update({"elapsedTime": time.time() - self.startTime})
ret.update({"learningRate": self.learningRate if self.lr_scheduler is None
else self.lr_scheduler.get_lr()})
print("Iteration time= {0:.3f} secs, "
"total elapsed time= {1:.3f} mins".format(
time.time() - t1,ret["elapsedTime"]/60.0))
return ret
def finalize(self, params, rep):
"""
Save the full model once we are done.
"""
if params.get("saveNet", True):
saveDir = os.path.join(params["path"], params["name"], "model.pt")
torch.save(self.model, saveDir)
def createLearningRateScheduler(self, params, optimizer):
"""
Creates the learning rate scheduler and attach the optimizer
"""
lr_scheduler = params.get("lr_scheduler", None)
if lr_scheduler is None:
return None
if lr_scheduler == "StepLR":
lr_scheduler_params = "{'step_size': 1, 'gamma':" + str(params["learning_rate_factor"]) + "}"
else:
lr_scheduler_params = params.get("lr_scheduler_params", None)
if lr_scheduler_params is None:
raise ValueError("Missing 'lr_scheduler_params' for {}".format(lr_scheduler))
# Get lr_scheduler class by name
clazz = eval("torch.optim.lr_scheduler.{}".format(lr_scheduler))
# Parse scheduler parameters from config
lr_scheduler_params = eval(lr_scheduler_params)
return clazz(optimizer, **lr_scheduler_params)
def createOptimizer(self, params, model):
"""
Create a new instance of the optimizer
"""
lr = params["learning_rate"]
print("Creating optimizer with learning rate=", lr)
if params["optimizer"] == "SGD":
optimizer = optim.SGD(model.parameters(), lr=lr,
momentum=params["momentum"],
weight_decay=params["weight_decay"],
)
elif params["optimizer"] == "Adam":
optimizer = optim.Adam(model.parameters(), lr=lr)
else:
raise LookupError("Incorrect optimizer value")
return optimizer
def train(self, params, epoch):
"""
Train one epoch of this model by iterating through mini batches. An epoch
ends after one pass through the training set, or if the number of mini
batches exceeds the parameter "batches_in_epoch".
"""
self.model.train()
for batch_idx, batch in enumerate(self.train_loader):
data = batch["input"]
if params["model_type"] in ["resnet9", "cnn"]:
data = torch.unsqueeze(data, 1)
target = batch["target"]
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad()
output = self.model(data)
loss = F.nll_loss(output, target)
loss.backward()
self.optimizer.step()
# Log info every log_interval mini batches
if batch_idx % params["log_interval"] == 0:
entropy = self.model.entropy()
print(
"logging: ",self.model.getLearningIterations(),
" learning iterations, elapsedTime", time.time() - self.startTime,
" entropy:", float(entropy)," / ", self.model.maxEntropy(),
"loss:", loss.item())
if params["create_plots"]:
plotDutyCycles(self.model.dutyCycle,
self.resultsDir + "/figure_"+str(epoch)+"_"+str(
self.model.getLearningIterations()))
if batch_idx >= params["batches_in_epoch"]:
break
self.model.postEpoch()
def test(self, params, test_loader):
"""
Test the model using the given loader and return test metrics
"""
self.model.eval()
test_loss = 0
correct = 0
nonzeros = None
count_nonzeros = params.get("count_nonzeros", False)
if count_nonzeros:
nonzeros = {}
register_nonzero_counter(self.model, nonzeros)
with torch.no_grad():
for batch in test_loader:
data = batch["input"]
if params["model_type"] in ["resnet9", "cnn"]:
data = torch.unsqueeze(data, 1)
target = batch["target"]
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
# count nonzeros only once
if count_nonzeros:
count_nonzeros = False
unregister_counter_nonzero(self.model)
test_loss /= len(test_loader.sampler)
test_error = 100. * correct / len(test_loader.sampler)
entropy = self.model.entropy()
ret = {"num_correct": correct,
"test_loss": test_loss,
"testerror": test_error,
"entropy": float(entropy)}
if nonzeros is not None:
ret["nonzeros"] = nonzeros
return ret
def runNoiseTests(self, params):
"""
Test the model with different noise values and return test metrics.
"""
ret = {}
testDataDir = os.path.join(self.dataDir, "test")
n_mels = 32
# Test with noise
total_correct = 0
for noise in [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5]:
# Create noise dataset with noise transform
noiseTransform = transforms.Compose([
FixAudioLength(),
AddNoise(noise),
ToSTFT(),
ToMelSpectrogramFromSTFT(n_mels=n_mels),
DeleteSTFT(),
ToTensor('mel_spectrogram', 'input')
])
noiseDataset = SpeechCommandsDataset(
testDataDir,
noiseTransform,
silence_percentage=0,
)
noise_loader = DataLoader(noiseDataset,
batch_size=params["batch_size"],
sampler=None,
shuffle=False,
pin_memory=self.use_cuda,
)
testResult = self.test(params, noise_loader)
total_correct += testResult["num_correct"]
ret[noise]= testResult
ret["totalCorrect"] = total_correct
ret["testerror"] = ret[0.0]["testerror"]
ret["entropy"] = ret[0.0]["entropy"]
if "nonzeros" in ret[0.0]:
ret["nonzeros"] = ret[0.0]["nonzeros"]
return ret
def loadDatasets(self, params):
"""
The GSC dataset specifies specific files to be used as training, test,
and validation. We assume the data has already been processed according
to those files into separate train, test, and valid directories.
For our experiment we use a subset of the data (10 categories out of 30),
just like the Kaggle competition.
"""
n_mels = 32
trainDataDir = os.path.join(self.dataDir, "train")
testDataDir = os.path.join(self.dataDir, "test")
validationDataDir = os.path.join(self.dataDir, "valid")
backgroundNoiseDir = os.path.join(self.dataDir,
params["background_noise_dir"])
dataAugmentationTransform = transforms.Compose([
ChangeAmplitude(),
ChangeSpeedAndPitchAudio(),
FixAudioLength(),
ToSTFT(),
StretchAudioOnSTFT(),
TimeshiftAudioOnSTFT(),
FixSTFTDimension(),
])
featureTransform = transforms.Compose(
[
ToMelSpectrogramFromSTFT(n_mels=n_mels),
DeleteSTFT(),
ToTensor('mel_spectrogram', 'input')
])
trainDataset = SpeechCommandsDataset(
trainDataDir,
transforms.Compose([
dataAugmentationTransform,
# add_bg_noise, # Uncomment to allow adding BG noise
# during training
featureTransform
]))
testFeatureTransform = transforms.Compose([
FixAudioLength(),
ToMelSpectrogram(n_mels=n_mels),
ToTensor('mel_spectrogram', 'input')
])
validationDataset = SpeechCommandsDataset(
validationDataDir,
testFeatureTransform,
silence_percentage=0,
)
testDataset = SpeechCommandsDataset(
testDataDir,
testFeatureTransform,
silence_percentage=0,
)
weights = trainDataset.make_weights_for_balanced_classes()
sampler = WeightedRandomSampler(weights, len(weights))
# print("Number of training samples=",len(trainDataset))
# print("Number of validation samples=",len(validationDataset))
# print("Number of test samples=",len(testDataset))
self.train_loader = DataLoader(trainDataset,
batch_size=params["batch_size"],
sampler=sampler
)
self.validation_loader = DataLoader(validationDataset,
batch_size=params["batch_size"],
shuffle=False
)
self.test_loader = DataLoader(testDataset,
batch_size=params["batch_size"],
sampler=None,
shuffle=False
)
bg_dataset = BackgroundNoiseDataset(
backgroundNoiseDir,
transforms.Compose([FixAudioLength(), ToSTFT()]),
)
bgNoiseTransform = transforms.Compose([
FixAudioLength(),
ToSTFT(),
AddBackgroundNoiseOnSTFT(bg_dataset),
ToMelSpectrogramFromSTFT(n_mels=n_mels),
DeleteSTFT(),
ToTensor('mel_spectrogram', 'input')
])
bgNoiseDataset = SpeechCommandsDataset(
testDataDir,
bgNoiseTransform,
silence_percentage=0,
)
self.bg_noise_loader = DataLoader(bgNoiseDataset,
batch_size=params["batch_size"],
sampler=None,
shuffle=False
)
if __name__ == '__main__':
suite = SparseSpeechExperiment()
suite.start()
| gpl-3.0 | -2,359,830,727,427,281,000 | 33.011605 | 99 | 0.615446 | false |
boudewijnrempt/HyvesDesktop | 3rdparty/socorro/socorro/database/schema.py | 1 | 70902 | import psycopg2 as pg
import datetime as dt
import threading
import sets
import socorro.lib.prioritize as socorro_pri
import socorro.lib.psycopghelper as socorro_psy
import socorro.database.postgresql as socorro_pg
import socorro.lib.util as socorro_util
"""
Schema.py contains several utility functions and the code which describes all the database tables used by socorro. It has
a test file: ../unittest/database/testSchema.py which MUST BE CHANGED IN PARALLEL to changes in schema.py. In particular,
if you add, remove or rename any of the XxxTable classes in this file, you must make a parallel change to the list
hardCodedSchemaClasses in the test file.
"""
#-----------------------------------------------------------------------------------------------------------------
def mondayPairsIteratorFactory(minDate, maxDate):
"""
Given a pair of dates, creates iterator that returns (aMonday,theNextMonday) such that
- the first returned pair defines an interval holding minDate
- the last returned pair defines an interval holding maxDate
if minDate or maxDate are not instances of datetime.date, raises TypeError
if maxDate > minDate, raises ValueError
"""
if not (isinstance(minDate,dt.date) and isinstance(maxDate,dt.date)):
raise TypeError("minDate and maxDate must be instances of datetime.date")
if maxDate < minDate:
raise ValueError("minDate must be <= maxDate")
def anIterator():
oneWeek = dt.timedelta(7)
aDate = minDate - dt.timedelta(minDate.weekday()) # begin on Monday before minDate
while aDate <= maxDate:
nextMonday = aDate + oneWeek
yield (aDate, nextMonday)
aDate = nextMonday
return anIterator()
#-----------------------------------------------------------------------------------------------------------------
# For each database TableClass below,
# databaseDependenciesForSetup[TableClass] = [List of TableClasses on which this TableClass depends]
# NOTE: This requires that new Tables be added textually below every Table on which they depend
databaseDependenciesForSetup = {}
def getOrderedSetupList(whichTables = None):
"""
A helper function to get the correct order to create tables during setup.
whichTables is a list of Tables, possibly empty, or None
If not whichTables, then all the known tables are visited
"""
# if whichTables is None, then databaseDependenciesForSetup.keys() is used
return socorro_pri.dependencyOrder(databaseDependenciesForSetup,whichTables)
databaseDependenciesForPartition = {}
def getOrderedPartitionList(whichTables):
"""
A helper function to get the needed PartionedTables for a given set of PartitionedTables
"""
if not whichTables:
return []
order = socorro_pri.dependencyOrder(databaseDependenciesForPartition,whichTables)
return order
# This set caches knowledge of existing partition tables to avoid hitting database. Beware cache incoherence
partitionCreationHistory = set()
#-----------------------------------------------------------------------------------------------------------------
def partitionWasCreated(partitionTableName):
"""Helper function to examine partitionCreationHistory"""
return partitionTableName in partitionCreationHistory
#-----------------------------------------------------------------------------------------------------------------
def markPartitionCreated(partitionTableName):
"""Helper function to update partitionCreationHistory"""
global partitionCreationHistory
partitionCreationHistory.add(partitionTableName)
#=================================================================================================================
class PartitionControlParameterRequired(Exception):
def __init__(self):
super(PartitionControlParameterRequired, self).__init__("No partition control paramter was supplied")
#=================================================================================================================
class DatabaseObject(object):
"""
Base class for all objects (Tables, Constraints, Indexes) that may be individually created and used in the database
Classes that inherit DatabaseObject:
- Must supply appropriate creationSql parameter to the superclass constructor
- May override method additionalCreationProcedure(self,aDatabaseCursor). If this is provided, it is
called after creationSql is executed in method create(self,aDatabaseCursor)
The cursor's connection is neither committed nor rolled back during the call to create
- May override methods which do nothing in this class:
= drop(self,aDatabaseCursor)
= updateDefinition(self,aDatabaseCursor)
= createPartitions(self,aDatabaseCursor,aPartitionDetailsIterator)
Every leaf class that inherits DatabaseObject should be aware of the module-level dictionary: databaseDependenciesForSetup.
If that leaf class should be created when the database is being set up, the class itself must be added as a key in the
databaseDependenciesForSetup dictionary. The value associated with that key is a possibly empty iterable containing the
classes on which the particular leaf class depends: Those that must already be created before the particular instance is
created. This is often because the particular table has one or more foreign keys referencing tables upon which it depends.
"""
#-----------------------------------------------------------------------------------------------------------------
def __init__(self, name=None, logger=None, creationSql=None, **kwargs):
super(DatabaseObject, self).__init__()
self.name = name
self.creationSql = creationSql
self.logger = logger
#-----------------------------------------------------------------------------------------------------------------
def _createSelf(self,databaseCursor):
databaseCursor.execute(self.creationSql)
self.additionalCreationProcedures(databaseCursor)
#-----------------------------------------------------------------------------------------------------------------
def create(self, databaseCursor):
orderedTableList = getOrderedSetupList([self.__class__])
for tableClass in orderedTableList:
tableObject = self
if not self.__class__ == tableClass:
tableObject = tableClass(logger = self.logger)
databaseCursor.execute("savepoint creating_%s"%tableObject.name)
try:
tableObject._createSelf(databaseCursor)
databaseCursor.execute("release savepoint creating_%s"%tableObject.name)
except pg.ProgrammingError,x:
databaseCursor.execute("rollback to creating_%s"%tableObject.name)
databaseCursor.connection.commit()
self.logger.debug("%s - in create for %s, table %s exists",threading.currentThread().getName(),self.name,tableObject.name)
#-----------------------------------------------------------------------------------------------------------------
def additionalCreationProcedures(self, databaseCursor):
pass
#-----------------------------------------------------------------------------------------------------------------
def updateDefinition(self, databaseCursor):
pass
#-----------------------------------------------------------------------------------------------------------------
def drop(self, databaseCursor):
pass
#-----------------------------------------------------------------------------------------------------------------
def createPartitions(self, databaseCursor, iterator):
pass
#=================================================================================================================
class Table (DatabaseObject):
"""
Base class for all Table objects that may be created and used in the database.
Classes that inherit DatabaseObject:
- Must supply appropriate creationSql parameter to the superclass constructor
- May override method insert(self,rowTuple, **kwargs) to do the right thing during an insert
- May provide method alterColumnDefinitions(self,aDatabaseCursor,tableName)
- May provide method updateDefinition(self,aDatabaseCursor)
- Must be aware of databaseDependenciesForSetup and how it is used
class Table inherits method create from DatabaseObject
class Table provides a reasonable implementation of method drop, overriding the empty one in DatabaseObject
"""
#-----------------------------------------------------------------------------------------------------------------
def __init__(self, name=None, logger=None, creationSql=None, **kwargs):
super(Table, self).__init__(name=name, logger=logger, creationSql=creationSql, **kwargs)
#-----------------------------------------------------------------------------------------------------------------
def drop(self, databaseCursor):
databaseCursor.execute("drop table if exists %s cascade" % self.name)
#-----------------------------------------------------------------------------------------------------------------
def insert(self, rowTuple=None, **kwargs):
pass
#=================================================================================================================
class PartitionedTable(Table):
"""
Base class for Tables that will be partitioned or are likely to be programmatically altered.
Classes that inherit PartitionedTable
- Must supply self.insertSql with 'TABLENAME' replacing the actual table name
- Must supply appropriate creationSql and partitionCreationSqlTemplate to the superclass constructor
- Should NOT override method insert, which does something special for PartitionedTables
- May override method partitionCreationParameters(self, partitionDetails) which returns a dictionary suitable for string formatting
Every leaf class that inherits PartitionedTable should be aware of the module-level dictionary: databaseDependenciesForPartition
If that leaf class has a partition that depends upon some other partition, then it must be added as a key to the dictionary
databaseDependenciesForPartition. The value associated with that key is an iterable containing the classes that define the partitions
on which this particular leaf class depends: Those that must already be created before the particular instance is created. This is
most often because the particular partition table has one or more foreign keys referencing partition tables upon which it depends.
"""
#-----------------------------------------------------------------------------------------------------------------
partitionCreationLock = threading.RLock()
def __init__ (self, name=None, logger=None, creationSql=None, partitionNameTemplate='%s', partitionCreationSqlTemplate='', weekInterval=None, **kwargs):
super(PartitionedTable, self).__init__(name=name, logger=logger, creationSql=creationSql)
self.partitionNameTemplate = partitionNameTemplate
self.partitionCreationSqlTemplate = partitionCreationSqlTemplate
self.weekInterval = weekInterval
if not weekInterval:
today = dt.date.today()
self.weekInterval = mondayPairsIteratorFactory(today,today)
self.insertSql = None
#-----------------------------------------------------------------------------------------------------------------
#def additionalCreationProcedures(self, databaseCursor):
#self.createPartitions(databaseCursor, self.weekInterval)
#-----------------------------------------------------------------------------------------------------------------
def _createOwnPartition(self, databaseCursor, uniqueItems):
"""
Internal method that assumes all precursor partitions are already in place before creating this one. Called
from createPartitions(same parameters) to avoid bottomless recursion. Creates one or more partitions for
this particular table, (more if uniqueItems has more than one element)
side effect: Cursor's connection has been committed() by the time we return
"""
self.logger.debug("%s - in createOwnPartition for %s",threading.currentThread().getName(),self.name)
for x in uniqueItems:
#self.logger.debug("DEBUG - item value is %s",x)
partitionCreationParameters = self.partitionCreationParameters(x)
partitionName = self.partitionNameTemplate % partitionCreationParameters["partitionName"]
if partitionWasCreated(partitionName):
#self.logger.debug("DEBUG - skipping creation of %s",partitionName)
continue
partitionCreationSql = self.partitionCreationSqlTemplate % partitionCreationParameters
#self.logger.debug("%s - Sql for %s is %s",threading.currentThread().getName(),self.name,partitionCreationSql)
aPartition = Table(name=partitionName, logger=self.logger, creationSql=partitionCreationSql)
self.logger.debug("%s - savepoint createPartitions_%s",threading.currentThread().getName(), partitionName)
databaseCursor.execute("savepoint createPartitions_%s" % partitionName)
try:
self.logger.debug("%s - creating %s", threading.currentThread().getName(), partitionName)
aPartition._createSelf(databaseCursor)
markPartitionCreated(partitionName)
self.logger.debug("%s - successful - releasing savepoint", threading.currentThread().getName())
databaseCursor.execute("release savepoint createPartitions_%s" % partitionName)
except pg.ProgrammingError, x:
self.logger.debug("%s -- Rolling back and releasing savepoint: Creating %s failed in createPartitions: %s", threading.currentThread().getName(), partitionName, str(x).strip())
databaseCursor.execute("rollback to createPartitions_%s; release savepoint createPartitions_%s;" % (partitionName, partitionName))
databaseCursor.connection.commit()
#-----------------------------------------------------------------------------------------------------------------
def createPartitions(self, databaseCursor, iterator):
"""
Create this table's partition(s) and all the precursor partition(s) needed to support this one
databaseCursor: as always
iterator: Supplies at least one unique identifier (a date). If more than one then more than one (family of)
partition(s) is created
side effects: The cursor's connection will be rolled back or committed by the end of this method
"""
self.logger.debug("%s - in createPartitions", threading.currentThread().getName())
partitionTableClasses = getOrderedPartitionList([self.__class__])
#self.logger.debug("DEBUG - Classes are %s",partitionTableClasses)
uniqueItems = [x for x in iterator]
for tableClass in partitionTableClasses:
tableObject = self
if not self.__class__ == tableClass:
tableObject = tableClass(logger = self.logger)
#self.logger.debug("DEBUG - Handling %s /w/ sql %s",tableObject.name,tableObject.partitionCreationSqlTemplate)
tableObject._createOwnPartition(databaseCursor,uniqueItems)
#-----------------------------------------------------------------------------------------------------------------
def partitionCreationParameters(self,partitioningData):
"""returns: a dictionary of string substitution parameters"""
return {}
#-----------------------------------------------------------------------------------------------------------------
def updateColumnDefinitions(self, databaseCursor):
childTableList = socorro_pg.childTablesForTable(self.name, databaseCursor)
for aChildTableName in childTableList:
databaseCursor.execute("alter table %s no inherit %s", (aTable, aChildTableName))
self.alterColumnDefinitions(databaseCursor, self.name)
for aChildTableName in childTableList:
self.alterColumnDefinitions(databaseCursor, aChildTableName)
for aChildTableName in childTableList:
databaseCursor.execute("alter table %s inherit %s", (aTable, aChildTableName))
#-----------------------------------------------------------------------------------------------------------------
def insert(self, databaseCursor, row, alternateCursorFunction, **kwargs):
try:
uniqueIdentifier = kwargs["date_processed"]
except KeyError:
raise PartitionControlParameterRequired()
dateRangeTuple = mondayPairsIteratorFactory(uniqueIdentifier, uniqueIdentifier).next()# create iterator and throw away
partitionName = self.partitionCreationParameters(dateRangeTuple)["partitionName"]
insertSql = self.insertSql.replace('TABLENAME', partitionName)
try:
databaseCursor.execute("savepoint %s" % partitionName)
#self.logger.debug("%s - Trying to insert into %s", threading.currentThread().getName(), self.name)
databaseCursor.execute(insertSql, row)
databaseCursor.execute("release savepoint %s" % partitionName)
except pg.ProgrammingError, x:
self.logger.debug('%s - Rolling back and releasing savepoint: failed: %s', threading.currentThread().getName(), str(x).strip())
databaseCursor.execute("rollback to %s; release savepoint %s;" % (partitionName, partitionName))
databaseCursor.connection.commit() # This line added after of hours of blood, sweat, tears. Remove only per deathwish.
altConnection, altCursor = alternateCursorFunction()
dateIterator = mondayPairsIteratorFactory(uniqueIdentifier, uniqueIdentifier)
try:
self.createPartitions(altCursor,dateIterator)
except pg.DatabaseError,x:
self.logger.debug("%s - Failed to create partition(s) %s: %s:%s", threading.currentThread().getName(), partitionName, type(x), x)
self.logger.debug("%s - trying to insert into %s for the second time", threading.currentThread().getName(), self.name)
databaseCursor.execute(insertSql, row)
#=================================================================================================================
class BranchesTable(Table):
"""Define the table 'branches'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(BranchesTable, self).__init__(name="branches", logger=logger,
creationSql = """
CREATE TABLE branches (
product character varying(30) NOT NULL,
version character varying(16) NOT NULL,
branch character varying(24) NOT NULL,
PRIMARY KEY (product, version)
);""")
databaseDependenciesForSetup[BranchesTable] = []
#=================================================================================================================
class ReportsTable(PartitionedTable):
"""Define the table 'reports'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(ReportsTable, self).__init__(name='reports', logger=logger,
creationSql="""
CREATE TABLE reports (
id serial NOT NULL,
client_crash_date timestamp with time zone,
date_processed timestamp without time zone,
uuid character varying(50) NOT NULL,
product character varying(30),
version character varying(16),
build character varying(30),
signature character varying(255),
url character varying(255),
install_age integer,
last_crash integer,
uptime integer,
cpu_name character varying(100),
cpu_info character varying(100),
reason character varying(255),
address character varying(20),
os_name character varying(100),
os_version character varying(100),
email character varying(100),
build_date timestamp without time zone,
user_id character varying(50),
started_datetime timestamp without time zone,
completed_datetime timestamp without time zone,
success boolean,
truncated boolean,
processor_notes text,
user_comments character varying(1024),
app_notes character varying(1024),
distributor character varying(20),
distributor_version character varying(20),
logfile text
);
--CREATE TRIGGER reports_insert_trigger
-- BEFORE INSERT ON reports
-- FOR EACH ROW EXECUTE PROCEDURE partition_insert_trigger();""",
partitionCreationSqlTemplate="""
CREATE TABLE %(partitionName)s (
CONSTRAINT %(partitionName)s_date_check CHECK (TIMESTAMP without time zone '%(startDate)s' <= date_processed and date_processed < TIMESTAMP without time zone '%(endDate)s'),
CONSTRAINT %(partitionName)s_unique_uuid unique (uuid),
PRIMARY KEY(id)
)
INHERITS (reports);
CREATE INDEX %(partitionName)s_date_processed_key ON %(partitionName)s (date_processed);
CREATE INDEX %(partitionName)s_uuid_key ON %(partitionName)s (uuid);
CREATE INDEX %(partitionName)s_signature_key ON %(partitionName)s (signature);
CREATE INDEX %(partitionName)s_url_key ON %(partitionName)s (url);
CREATE INDEX %(partitionName)s_product_version_key ON %(partitionName)s (product, version);
--CREATE INDEX %(partitionName)s_uuid_date_processed_key ON %(partitionName)s (uuid, date_processed);
CREATE INDEX %(partitionName)s_signature_date_processed_key ON %(partitionName)s (signature, date_processed);
"""
)
self.insertSql = """insert into TABLENAME
(uuid, client_crash_date, date_processed, product, version, build, url, install_age, last_crash, uptime, email, build_date, user_id, user_comments, app_notes, distributor, distributor_version, logfile) values
(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
#-----------------------------------------------------------------------------------------------------------------
def additionalCreationProcedures(self, databaseCursor):
pass
#-----------------------------------------------------------------------------------------------------------------
def partitionCreationParameters(self, uniqueIdentifier):
startDate, endDate = uniqueIdentifier
startDateAsString = "%4d-%02d-%02d" % startDate.timetuple()[:3]
compressedStartDateAsString = startDateAsString.replace("-", "")
endDateAsString = "%4d-%02d-%02d" % endDate.timetuple()[:3]
return { "partitionName": "reports_%s" % compressedStartDateAsString,
"startDate": startDateAsString,
"endDate": endDateAsString,
"compressedStartDate": compressedStartDateAsString
}
#-----------------------------------------------------------------------------------------------------------------
def alterColumnDefinitions(self, databaseCursor, tableName):
columnNameTypeDictionary = socorro_pg.columnNameTypeDictionaryForTable(tableName, databaseCursor)
#if 'user_comments' not in columnNameTypeDictionary:
#databaseCursor.execute("""ALTER TABLE %s rename column comments to user_comments""" % tableName)
#if 'client_crash_date' not in columnNameTypeDictionary:
#databaseCursor.execute("""ALTER TABLE %s rename column date to client_crash_date""" % tableName)
#if 'app_notes' not in columnNameTypeDictionary:
#databaseCursor.execute("""ALTER TABLE %s ADD COLUMN app_notes character varying(1024)""" % tableName)
#if 'distributor' not in columnNameTypeDictionary:
#databaseCursor.execute("""ALTER TABLE %s ADD COLUMN distributor character varying(20)""" % tableName)
#if 'distributor_version' not in columnNameTypeDictionary:
#databaseCursor.execute("""ALTER TABLE %s ADD COLUMN distributor_version character varying(20)""" % tableName)
#if 'message' in columnNameTypeDictionary:
#databaseCursor.execute("""ALTER TABLE %s rename column message to processor_notes""" % tableName)
#if 'started_datetime' not in columnNameTypeDictionary:
#databaseCursor.execute("""ALTER TABLE %s rename column starteddatetime to started_datetime""" % tableName)
#if 'completed_datetime' not in columnNameTypeDictionary:
#databaseCursor.execute("""ALTER TABLE %s rename column completeddatetime to completed_datetime""" % tableName)
#-----------------------------------------------------------------------------------------------------------------
def updateDefinition(self, databaseCursor):
databaseCursor.execute("""DROP RULE IF EXISTS rule_reports_partition ON reports;""")
self.updateColumnDefinitions(databaseCursor)
indexesList = socorro_pg.indexesForTable(self.name, databaseCursor)
#if 'reports_pkey' in indexesList:
#databaseCursor.execute("""ALTER TABLE reports DROP CONSTRAINT reports_pkey CASCADE;""")
#if 'idx_reports_date' in indexesList:
#databaseCursor.execute("""DROP INDEX idx_reports_date;""")
#if 'ix_reports_signature' in indexesList:
#databaseCursor.execute("""DROP INDEX ix_reports_signature;""")
#if 'ix_reports_url' in indexesList:
#databaseCursor.execute("""DROP INDEX ix_reports_url;""")
#if 'ix_reports_uuid' in indexesList:
#databaseCursor.execute("""DROP INDEX ix_reports_uuid;""")
#triggersList = socorro_pg.triggersForTable(self.name, databaseCursor)
#if 'reports_insert_trigger' not in triggersList:
#databaseCursor.execute("""CREATE TRIGGER reports_insert_trigger
#BEFORE INSERT ON reports
#FOR EACH ROW EXECUTE PROCEDURE partition_insert_trigger();""")
databaseDependenciesForSetup[ReportsTable] = []
#=================================================================================================================
class DumpsTable(PartitionedTable):
"""Define the table 'dumps'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(DumpsTable, self).__init__(name='dumps', logger=logger,
creationSql="""
CREATE TABLE dumps (
report_id integer NOT NULL PRIMARY KEY,
date_processed timestamp without time zone,
data text
);
--CREATE TRIGGER dumps_insert_trigger
-- BEFORE INSERT ON dumps
-- FOR EACH ROW EXECUTE PROCEDURE partition_insert_trigger();""",
partitionCreationSqlTemplate="""
CREATE TABLE %(partitionName)s (
CONSTRAINT %(partitionName)s_date_check CHECK (TIMESTAMP without time zone '%(startDate)s' <= date_processed and date_processed < TIMESTAMP without time zone '%(endDate)s')
)
INHERITS (dumps);
CREATE INDEX %(partitionName)s_report_id_date_key ON %(partitionName)s (report_id, date_processed);
ALTER TABLE %(partitionName)s
ADD CONSTRAINT %(partitionName)s_report_id_fkey FOREIGN KEY (report_id) REFERENCES reports_%(compressedStartDate)s(id) ON DELETE CASCADE;
""")
self.insertSql = """insert into TABLENAME (report_id, date_processed, data) values (%s, %s, %s)"""
#-----------------------------------------------------------------------------------------------------------------
def alterColumnDefinitions(self, databaseCursor, tableName):
columnNameTypeDictionary = socorro_pg.columnNameTypeDictionaryForTable(tableName, databaseCursor)
#if 'date_processed' not in columnNameTypeDictionary:
#databaseCursor.execute("""ALTER TABLE %s
#ADD COLUMN date_processed TIMESTAMP without time zone;""" % tableName)
#-----------------------------------------------------------------------------------------------------------------
def updateDefinition(self, databaseCursor):
self.updateColumnDefinitions(databaseCursor)
indexesList = socorro_pg.indexesForTable(self.name, databaseCursor)
#if 'dumps_pkey' in indexesList:
#databaseCursor.execute("""ALTER TABLE dumps
#DROP CONSTRAINT dumps_pkey;""")
#databaseCursor.execute("""DROP RULE IF EXISTS rule_dumps_partition ON dumps;""")
#triggersList = socorro_pg.triggersForTable(self.name, databaseCursor)
#if 'dumps_insert_trigger' not in triggersList:
#databaseCursor.execute("""CREATE TRIGGER dumps_insert_trigger
#BEFORE INSERT ON dumps
#FOR EACH ROW EXECUTE PROCEDURE partition_insert_trigger();""")
#-----------------------------------------------------------------------------------------------------------------
def partitionCreationParameters(self, uniqueIdentifier):
startDate, endDate = uniqueIdentifier
startDateAsString = "%4d-%02d-%02d" % startDate.timetuple()[:3]
compressedStartDateAsString = startDateAsString.replace("-", "")
endDateAsString = "%4d-%02d-%02d" % endDate.timetuple()[:3]
return { "partitionName": "dumps_%s" % compressedStartDateAsString,
"startDate": startDateAsString,
"endDate": endDateAsString,
"compressedStartDate": compressedStartDateAsString
}
databaseDependenciesForSetup[DumpsTable] = []
databaseDependenciesForPartition[DumpsTable] = [ReportsTable]
#=================================================================================================================
class ExtensionsTable(PartitionedTable):
"""Define the table 'extensions'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(ExtensionsTable, self).__init__(name='extensions', logger=logger,
creationSql="""
CREATE TABLE extensions (
report_id integer NOT NULL,
date_processed timestamp without time zone,
extension_key integer NOT NULL,
extension_id character varying(100) NOT NULL,
extension_version character varying(16)
);
--CREATE TRIGGER extensions_insert_trigger
-- BEFORE INSERT ON extensions
-- FOR EACH ROW EXECUTE PROCEDURE partition_insert_trigger();""",
partitionCreationSqlTemplate="""
CREATE TABLE %(partitionName)s (
CONSTRAINT %(partitionName)s_date_check CHECK (TIMESTAMP without time zone '%(startDate)s' <= date_processed and date_processed < TIMESTAMP without time zone '%(endDate)s'),
PRIMARY KEY (report_id)
)
INHERITS (extensions);
CREATE INDEX %(partitionName)s_report_id_date_key ON %(partitionName)s (report_id, date_processed);
ALTER TABLE %(partitionName)s
ADD CONSTRAINT %(partitionName)s_report_id_fkey FOREIGN KEY (report_id) REFERENCES reports_%(compressedStartDate)s(id) ON DELETE CASCADE;
""")
self.insertSql = """insert into TABLENAME (report_id, date_processed, extension_key, extension_id, extension_version) values (%s, %s, %s, %s, %s)"""
#-----------------------------------------------------------------------------------------------------------------
def alterColumnDefinitions(self, databaseCursor, tableName):
columnNameTypeDictionary = socorro_pg.columnNameTypeDictionaryForTable(tableName, databaseCursor)
#if 'date_processed' not in columnNameTypeDictionary:
#databaseCursor.execute("""ALTER TABLE %s
#ADD COLUMN date_processed TIMESTAMP without time zone;""" % tableName)
#-----------------------------------------------------------------------------------------------------------------
def updateDefinition(self, databaseCursor):
self.updateColumnDefinitions(databaseCursor)
indexesList = socorro_pg.indexesForTable(self.name, databaseCursor)
#if 'extensions_pkey' in indexesList:
#databaseCursor.execute("""ALTER TABLE extensions
#DROP CONSTRAINT extensions_pkey;""")
#databaseCursor.execute("""DROP RULE IF EXISTS rule_extensions_partition ON extensions;""")
#triggersList = socorro_pg.triggersForTable(self.name, databaseCursor)
#if 'extensions_insert_trigger' not in triggersList:
#databaseCursor.execute("""CREATE TRIGGER extensions_insert_trigger
#BEFORE INSERT ON extensions
#FOR EACH ROW EXECUTE PROCEDURE partition_insert_trigger();""")
#-----------------------------------------------------------------------------------------------------------------
def partitionCreationParameters(self, uniqueIdentifier):
startDate, endDate = uniqueIdentifier
startDateAsString = "%4d-%02d-%02d" % startDate.timetuple()[:3]
compressedStartDateAsString = startDateAsString.replace("-", "")
endDateAsString = "%4d-%02d-%02d" % endDate.timetuple()[:3]
return { "partitionName": "extensions_%s" % compressedStartDateAsString,
"startDate": startDateAsString,
"endDate": endDateAsString,
"compressedStartDate": compressedStartDateAsString
}
databaseDependenciesForPartition[ExtensionsTable] = [ReportsTable]
databaseDependenciesForSetup[ExtensionsTable] = []
#=================================================================================================================
class FramesTable(PartitionedTable):
"""Define the table 'frames'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(FramesTable, self).__init__(name='frames', logger=logger,
creationSql="""
CREATE TABLE frames (
report_id integer NOT NULL,
date_processed timestamp without time zone,
frame_num integer NOT NULL,
signature varchar(255)
);
--CREATE TRIGGER frames_insert_trigger
-- BEFORE INSERT ON frames
-- FOR EACH ROW EXECUTE PROCEDURE partition_insert_trigger();""",
partitionCreationSqlTemplate="""
CREATE TABLE %(partitionName)s (
CONSTRAINT %(partitionName)s_date_check CHECK (TIMESTAMP without time zone '%(startDate)s' <= date_processed and date_processed < TIMESTAMP without time zone '%(endDate)s'),
PRIMARY KEY (report_id, frame_num)
)
INHERITS (frames);
CREATE INDEX %(partitionName)s_report_id_date_key ON %(partitionName)s (report_id, date_processed);
ALTER TABLE %(partitionName)s
ADD CONSTRAINT %(partitionName)s_report_id_fkey FOREIGN KEY (report_id) REFERENCES reports_%(compressedStartDate)s(id) ON DELETE CASCADE;
"""
)
self.insertSql = """insert into TABLENAME (report_id, frame_num, date_processed, signature) values (%s, %s, %s, %s)"""
#-----------------------------------------------------------------------------------------------------------------
def alterColumnDefinitions(self, databaseCursor, tableName):
columnNameTypeDictionary = socorro_pg.columnNameTypeDictionaryForTable(tableName, databaseCursor)
#if 'date_processed' not in columnNameTypeDictionary:
#databaseCursor.execute("""ALTER TABLE %s
#ADD COLUMN date_processed TIMESTAMP without time zone;""" % tableName)
#-----------------------------------------------------------------------------------------------------------------
def updateDefinition(self, databaseCursor):
self.updateColumnDefinitions(databaseCursor)
indexesList = socorro_pg.indexesForTable(self.name, databaseCursor)
#if 'frames_pkey' in indexesList:
#databaseCursor.execute("""ALTER TABLE frames
#DROP CONSTRAINT frames_pkey;""")
#databaseCursor.execute("""DROP RULE IF EXISTS rule_frames_partition ON frames;""")
#triggersList = socorro_pg.triggersForTable(self.name, databaseCursor)
#if 'frames_insert_trigger' not in triggersList:
#databaseCursor.execute("""CREATE TRIGGER frames_insert_trigger
#BEFORE INSERT ON frames
#FOR EACH ROW EXECUTE PROCEDURE partition_insert_trigger();""")
#-----------------------------------------------------------------------------------------------------------------
def partitionCreationParameters(self, uniqueIdentifier):
startDate, endDate = uniqueIdentifier
startDateAsString = "%4d-%02d-%02d" % startDate.timetuple()[:3]
compressedStartDateAsString = startDateAsString.replace("-", "")
endDateAsString = "%4d-%02d-%02d" % endDate.timetuple()[:3]
return { "partitionName": "frames_%s" % compressedStartDateAsString,
"startDate": startDateAsString,
"endDate": endDateAsString,
"compressedStartDate": compressedStartDateAsString
}
databaseDependenciesForPartition[FramesTable] = [ReportsTable]
databaseDependenciesForSetup[FramesTable] = []
#=================================================================================================================
class PriorityJobsTable(Table):
"""Define the table 'priorityjobs'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, name="priorityjobs", logger=None, **kwargs):
super(PriorityJobsTable, self).__init__(name=name, logger=logger,
creationSql = """
CREATE TABLE %s (
uuid varchar(255) NOT NULL PRIMARY KEY
);""" % name)
databaseDependenciesForSetup[PriorityJobsTable] = []
#=================================================================================================================
class ProcessorsTable(Table):
"""Define the table 'processors'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(ProcessorsTable, self).__init__(name = "processors", logger=logger,
creationSql = """
CREATE TABLE processors (
id serial NOT NULL PRIMARY KEY,
name varchar(255) NOT NULL UNIQUE,
startdatetime timestamp without time zone NOT NULL,
lastseendatetime timestamp without time zone
);""")
def updateDefinition(self, databaseCursor):
indexesList = socorro_pg.indexesForTable(self.name, databaseCursor)
#if 'idx_processor_name' in indexesList:
#databaseCursor.execute("""DROP INDEX idx_processor_name;
#ALTER TABLE processors ADD CONSTRAINT processors_name_key UNIQUE (name);""")
databaseDependenciesForSetup[ProcessorsTable] = []
#=================================================================================================================
class JobsTable(Table):
"""Define the table 'jobs'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(JobsTable, self).__init__(name = "jobs", logger=logger,
creationSql = """
CREATE TABLE jobs (
id serial NOT NULL PRIMARY KEY,
pathname character varying(1024) NOT NULL,
uuid varchar(50) NOT NULL UNIQUE,
owner integer,
priority integer DEFAULT 0,
queueddatetime timestamp without time zone,
starteddatetime timestamp without time zone,
completeddatetime timestamp without time zone,
success boolean,
message text,
FOREIGN KEY (owner) REFERENCES processors (id)
);
CREATE INDEX jobs_owner_key ON jobs (owner);
CREATE INDEX jobs_owner_starteddatetime_key ON jobs (owner, starteddatetime);
CREATE INDEX jobs_owner_starteddatetime_priority_key ON jobs (owner, starteddatetime, priority DESC);
CREATE INDEX jobs_completeddatetime_queueddatetime_key ON jobs (completeddatetime, queueddatetime);
--CREATE INDEX jobs_priority_key ON jobs (priority);
""")
#-----------------------------------------------------------------------------------------------------------------
def updateDefinition(self, databaseCursor):
indexesList = socorro_pg.indexesForTable(self.name, databaseCursor)
if 'idx_owner' in indexesList:
databaseCursor.execute("""
DROP INDEX idx_owner;
CREATE INDEX jobs_owner_key ON jobs (owner);""")
if 'idx_queueddatetime' in indexesList:
databaseCursor.execute("""
DROP INDEX idx_queueddatetime;""")
if 'idx_starteddatetime' in indexesList:
databaseCursor.execute("""
DROP INDEX idx_starteddatetime;""")
if 'jobs_priority_queueddatetime' in indexesList:
databaseCursor.execute("""
DROP INDEX jobs_priority_queueddatetime;""")
if 'jobs_owner_starteddatetime' in indexesList:
databaseCursor.execute("""
DROP INDEX jobs_owner_starteddatetime;
CREATE INDEX jobs_owner_starteddatetime_key ON jobs (owner, starteddatetime);""")
#if 'jobs_priority_key' not in indexesList:
# databaseCursor.execute("""CREATE INDEX jobs_priority_key ON jobs (priority);""")
if 'jobs_owner_starteddatetime_priority_key' not in indexesList:
databaseCursor.execute("""CREATE INDEX jobs_owner_starteddatetime_priority_key ON jobs (owner, starteddatetime, priority DESC);""")
if 'jobs_completeddatetime_queueddatetime_key' not in indexesList:
databaseCursor.execute("""CREATE INDEX jobs_completeddatetime_queueddatetime_key ON jobs (completeddatetime, queueddatetime);""")
if 'jobs_success_key' not in indexesList:
databaseCursor.execute("""CREATE INDEX jobs_success_key ON jobs (success);""")
databaseDependenciesForSetup[JobsTable] = [ProcessorsTable]
#=================================================================================================================
class ServerStatusTable(Table):
"""Define the table 'server_status'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(ServerStatusTable, self).__init__(name='server_status', logger=logger,
creationSql="""
CREATE TABLE server_status (
id serial NOT NULL,
date_recently_completed timestamp without time zone,
date_oldest_job_queued timestamp without time zone,
avg_process_sec real,
avg_wait_sec real,
waiting_job_count integer NOT NULL,
processors_count integer NOT NULL,
date_created timestamp without time zone NOT NULL
);
ALTER TABLE ONLY server_status
ADD CONSTRAINT server_status_pkey PRIMARY KEY (id);
CREATE INDEX idx_server_status_date ON server_status USING btree (date_created, id);
""")
databaseDependenciesForSetup[ServerStatusTable] = []
#=================================================================================================================
class SignatureDimsTable(Table):
"""Define the table 'signaturedims'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(Table, self).__init__(name='signaturedims', logger=logger,
creationSql="""
CREATE TABLE signaturedims (
id serial NOT NULL,
signature character varying(255) NOT NULL);
ALTER TABLE ONLY signaturedims
ADD CONSTRAINT signaturedims_pkey PRIMARY KEY (id);
CREATE UNIQUE INDEX signaturedims_signature_key ON signaturedims USING btree (signature);
""")
databaseDependenciesForSetup[SignatureDimsTable] = []
#=================================================================================================================
class ProductDimsTable(Table):
"""Define the table 'productdims'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(Table, self).__init__(name='productdims', logger=logger,
creationSql="""
CREATE TABLE productdims (
id serial NOT NULL,
product character varying(30) NOT NULL,
version character varying(16) NOT NULL,
os_name character varying(100),
release character varying(50) NOT NULL);
ALTER TABLE ONLY productdims
ADD CONSTRAINT productdims_pkey PRIMARY KEY (id);
CREATE INDEX productdims_product_version_key ON productdims USING btree (product, version);
CREATE UNIQUE INDEX productdims_product_version_os_name_release_key ON productdims USING btree (product, version, release, os_name);
""")
databaseDependenciesForSetup[ProductDimsTable] = []
#=================================================================================================================
class MTBFFactsTable(Table):
"""Define the table 'mtbffacts'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(Table, self).__init__(name='mtbffacts', logger=logger,
creationSql="""
CREATE TABLE mtbffacts (
id serial NOT NULL,
avg_seconds integer NOT NULL,
report_count integer NOT NULL,
unique_users integer NOT NULL,
day date,
productdims_id integer);
CREATE INDEX mtbffacts_day_key ON mtbffacts USING btree (day);
CREATE INDEX mtbffacts_product_id_key ON mtbffacts USING btree (productdims_id);
ALTER TABLE ONLY mtbffacts
ADD CONSTRAINT mtbffacts_pkey PRIMARY KEY (id);
ALTER TABLE ONLY mtbffacts
ADD CONSTRAINT mtbffacts_productdims_id_fkey FOREIGN KEY (productdims_id) REFERENCES productdims(id);
""")
databaseDependenciesForSetup[MTBFFactsTable] = [ProductDimsTable]
#=================================================================================================================
class MTBFConfigTable(Table):
"""Define the table 'mtbfconfig'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(Table, self).__init__(name='mtbfconfig', logger=logger,
creationSql="""
CREATE TABLE mtbfconfig (
id serial NOT NULL,
productdims_id integer,
start_dt date,
end_dt date);
ALTER TABLE ONLY mtbfconfig
ADD CONSTRAINT mtbfconfig_pkey PRIMARY KEY (id);
CREATE INDEX mtbfconfig_end_dt_key ON mtbfconfig USING btree (end_dt);
CREATE INDEX mtbfconfig_start_dt_key ON mtbfconfig USING btree (start_dt);
ALTER TABLE ONLY mtbfconfig
ADD CONSTRAINT mtbfconfig_productdims_id_fkey FOREIGN KEY (productdims_id) REFERENCES productdims(id);
""")
databaseDependenciesForSetup[MTBFConfigTable] = [ProductDimsTable]
#=================================================================================================================
class TCByUrlConfigTable(Table):
"""Define the table 'tcbyurlconfig'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(Table, self).__init__(name='tcbyurlconfig', logger=logger,
creationSql="""
CREATE TABLE tcbyurlconfig (
id serial NOT NULL,
productdims_id integer,
enabled boolean);
ALTER TABLE ONLY tcbyurlconfig
ADD CONSTRAINT tcbyurlconfig_pkey PRIMARY KEY (id);
ALTER TABLE ONLY tcbyurlconfig
ADD CONSTRAINT tcbyurlconfig_productdims_id_fkey FOREIGN KEY (productdims_id) REFERENCES productdims(id);
""")
databaseDependenciesForSetup[TCByUrlConfigTable] = [ProductDimsTable]
#=================================================================================================================
class UrlDimsTable(Table):
"""Define the table 'urldims'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(Table, self).__init__(name='urldims', logger=logger,
creationSql="""
CREATE TABLE urldims (
id serial NOT NULL,
domain character varying(255) NOT NULL,
url character varying(255) NOT NULL);
ALTER TABLE ONLY urldims
ADD CONSTRAINT urldims_pkey PRIMARY KEY (id);
CREATE UNIQUE INDEX urldims_url_domain_key ON urldims USING btree (url, domain);
""")
databaseDependenciesForSetup[UrlDimsTable] = []
#=================================================================================================================
class TopCrashUrlFactsTable(Table):
"""Define the table 'topcrashurlfacts'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(Table, self).__init__(name='topcrashurlfacts', logger=logger,
creationSql="""
CREATE TABLE topcrashurlfacts (
id serial NOT NULL,
count integer NOT NULL,
rank integer,
day date NOT NULL,
productdims_id integer,
urldims_id integer,
signaturedims_id integer
);
ALTER TABLE ONLY topcrashurlfacts
ADD CONSTRAINT topcrashurlfacts_pkey PRIMARY KEY (id);
CREATE INDEX topcrashurlfacts_count_key ON topcrashurlfacts USING btree (count);
CREATE INDEX topcrashurlfacts_day_key ON topcrashurlfacts USING btree (day);
CREATE INDEX topcrashurlfacts_productdims_key ON topcrashurlfacts USING btree (productdims_id);
CREATE INDEX topcrashurlfacts_signaturedims_key ON topcrashurlfacts USING btree (signaturedims_id);
CREATE INDEX topcrashurlfacts_urldims_key ON topcrashurlfacts USING btree (urldims_id);
ALTER TABLE ONLY topcrashurlfacts
ADD CONSTRAINT topcrashurlfacts_productdims_id_fkey FOREIGN KEY (productdims_id) REFERENCES productdims(id);
ALTER TABLE ONLY topcrashurlfacts
ADD CONSTRAINT topcrashurlfacts_signaturedims_id_fkey FOREIGN KEY (signaturedims_id) REFERENCES signaturedims(id);
ALTER TABLE ONLY topcrashurlfacts
ADD CONSTRAINT topcrashurlfacts_urldims_id_fkey FOREIGN KEY (urldims_id) REFERENCES urldims(id);
""")
databaseDependenciesForSetup[TopCrashUrlFactsTable] = [ProductDimsTable,SignatureDimsTable,UrlDimsTable]
#=================================================================================================================
class TopCrashUrlFactsReportsTable(Table):
"""Define the table 'topcrashurlfactsreports'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(Table, self).__init__(name='topcrashurlfactsreports', logger=logger,
creationSql="""
CREATE TABLE topcrashurlfactsreports (
id serial NOT NULL,
uuid character varying(50) NOT NULL,
comments character varying(500),
topcrashurlfacts_id integer);
ALTER TABLE ONLY topcrashurlfactsreports
ADD CONSTRAINT topcrashurlfactsreports_pkey PRIMARY KEY (id);
CREATE INDEX topcrashurlfactsreports_topcrashurlfacts_id_key ON topcrashurlfactsreports USING btree (topcrashurlfacts_id);
ALTER TABLE ONLY topcrashurlfactsreports
ADD CONSTRAINT topcrashurlfactsreports_topcrashurlfacts_id_fkey FOREIGN KEY (topcrashurlfacts_id) REFERENCES topcrashurlfacts(id) ON DELETE CASCADE;
""")
databaseDependenciesForSetup[TopCrashUrlFactsReportsTable] = [TopCrashUrlFactsTable]
#=================================================================================================================
class TopCrashersTable(Table):
"""Define the table 'topcrashers'"""
#-----------------------------------------------------------------------------------------------------------------
def __init__ (self, logger, **kwargs):
super(TopCrashersTable, self).__init__(name='topcrashers', logger=logger,
creationSql="""
CREATE TABLE topcrashers (
id serial NOT NULL,
signature character varying(255) NOT NULL,
version character varying(30) NOT NULL,
product character varying(30) NOT NULL,
build character varying(30) NOT NULL,
total integer,
win integer,
mac integer,
linux integer,
rank integer,
last_rank integer,
trend character varying(30),
uptime real,
users integer,
last_updated timestamp without time zone
);
ALTER TABLE ONLY topcrashers
ADD CONSTRAINT topcrashers_pkey PRIMARY KEY (id);
""")
databaseDependenciesForSetup[TopCrashersTable] = []
#=================================================================================================================
#class ParititioningTriggerScript(DatabaseObject):
##-----------------------------------------------------------------------------------------------------------------
#def __init__ (self, logger):
#super(ParititioningTriggerScript, self).__init__(name = "partition_insert_trigger", logger=logger,
#creationSql = """
#CREATE OR REPLACE FUNCTION partition_insert_trigger()
#RETURNS TRIGGER AS $$
#import socorro.database.server as ds
#try:
#targetTableName = ds.targetTableName(TD["table_name"], TD['new']['date_processed'])
##plpy.info(targetTableName)
#planName = ds.targetTableInsertPlanName (targetTableName)
##plpy.info("using plan: %s" % planName)
#values = ds.getValuesList(TD, SD, plpy)
##plpy.info(str(values))
##plpy.info('about to execute plan')
#result = plpy.execute(SD[planName], values)
#return None
#except KeyError: #no plan
##plpy.info("oops no plan for: %s" % planName)
#SD[planName] = ds.createNewInsertQueryPlan(TD, SD, targetTableName, planName, plpy)
##plpy.info('about to execute plan for second time')
#result = plpy.execute(SD[planName], values)
#return None
#$$
#LANGUAGE plpythonu;""")
#def updateDefinition(self, databaseCursor):
#databaseCursor.execute(self.creationSql)
#=================================================================================================================
#class ChattyParititioningTriggerScript(DatabaseObject):
#-----------------------------------------------------------------------------------------------------------------
#def __init__ (self, logger):
#super(ChattyParititioningTriggerScript, self).__init__(name = "partition_insert_trigger", logger=logger,
#creationSql = """
#CREATE OR REPLACE FUNCTION partition_insert_trigger()
#RETURNS TRIGGER AS $$
#import socorro.database.server as ds
#import logging
#import logging.handlers
#try:
#targetTableName = ds.targetTableName(TD["table_name"], TD['new']['date_processed'])
#planName = ds.targetTableInsertPlanName (targetTableName)
#try:
#logger = SD["logger"]
#except KeyError:
#SD["logger"] = logger = logging.getLogger(targetTableName)
#logger.setLevel(logging.DEBUG)
#rotatingFileLog = logging.handlers.RotatingFileHandler("/tmp/partitionTrigger.log", "a", 100000000, 10)
#rotatingFileLog.setLevel(logging.DEBUG)
#rotatingFileLogFormatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')
#rotatingFileLog.setFormatter(rotatingFileLogFormatter)
#logger.addHandler(rotatingFileLog)
#logger.debug("---------- beginning new session ----------")
#SD["counter"] = 0
#values = ds.getValuesList(TD, SD, plpy)
#logger.debug("%08d plan: %s", SD["counter"], planName)
#SD["counter"] += 1
#result = plpy.execute(SD[planName], values)
#return 'SKIP'
#except KeyError: #no plan
#logger.debug('creating new plan for: %s', planName)
#SD[planName] = ds.createNewInsertQueryPlan(TD, SD, targetTableName, planName, plpy)
#result = plpy.execute(SD[planName], values)
#return 'SKIP'
#$$
#LANGUAGE plpythonu;""")
##-----------------------------------------------------------------------------------------------------------------
#def updateDefinition(self, databaseCursor):
#databaseCursor.execute(self.creationSql)
#-----------------------------------------------------------------------------------------------------------------
def connectToDatabase(config, logger):
databaseDSN = "host=%(databaseHost)s dbname=%(databaseName)s user=%(databaseUserName)s password=%(databasePassword)s" % config
databaseConnection = pg.connect(databaseDSN)
databaseCursor = databaseConnection.cursor(cursor_factory=socorro_psy.LoggingCursor)
databaseCursor.setLogger(logger)
return (databaseConnection, databaseCursor)
#-----------------------------------------------------------------------------------------------------------------
def setupDatabase(config, logger):
databaseConnection, databaseCursor = connectToDatabase(config, logger)
try:
aList = getOrderedSetupList()
for aDatabaseObjectClass in getOrderedSetupList():
aDatabaseObject = aDatabaseObjectClass(logger=logger)
aDatabaseObject._createSelf(databaseCursor)
databaseConnection.commit()
except:
databaseConnection.rollback()
socorro_util.reportExceptionAndAbort(logger)
#-----------------------------------------------------------------------------------------------------------------
def teardownDatabase(config,logger):
databaseConnection,databaseCursor = connectToDatabase(config,logger)
try:
for databaseObjectClass in getOrderedSetupList():
aDatabaseObject = databaseObjectClass(logger=logger)
aDatabaseObject.drop(databaseCursor)
databaseConnection.commit()
except:
databaseConnection.rollback()
socorro_util.reportExceptionAndContinue(logger)
#-----------------------------------------------------------------------------------------------------------------
databaseObjectClassListForUpdate = [ReportsTable,
DumpsTable,
ExtensionsTable,
FramesTable,
ProcessorsTable,
JobsTable,
]
#-----------------------------------------------------------------------------------------------------------------
def updateDatabase(config, logger):
databaseConnection, databaseCursor = connectToDatabase(config, logger)
try:
#try:
#databaseCursor.execute("CREATE LANGUAGE plpythonu")
#except:
#databaseConnection.rollback()
for aDatabaseObjectClass in databaseObjectClassListForUpdate:
aDatabaseObject = aDatabaseObjectClass(logger=logger)
aDatabaseObject.updateDefinition(databaseCursor)
databaseConnection.commit()
except:
databaseConnection.rollback()
socorro_util.reportExceptionAndAbort(logger)
#-----------------------------------------------------------------------------------------------------------------
# list all the tables that should have weekly partitions pre-created. This is a subclass of all the PartitionedTables
# since it may be that some PartitionedTables should not be pre-created.
databaseObjectClassListForWeeklyPartitions = [ReportsTable,
DumpsTable,
FramesTable,
ExtensionsTable,
]
#-----------------------------------------------------------------------------------------------------------------
def createPartitions(config, logger):
"""
Create a set of partitions for all the tables known to be efficient when they are created prior to being needed.
see the list databaseObjectClassListForWeeklyParitions above
"""
databaseConnection, databaseCursor = connectToDatabase(config, logger)
try:
for aDatabaseObjectClass in databaseObjectClassListForWeeklyPartitions:
weekIterator = mondayPairsIteratorFactory(config.startDate, config.endDate)
aDatabaseObject = aDatabaseObjectClass(logger=logger)
aDatabaseObject.createPartitions(databaseCursor, weekIterator)
databaseConnection.commit()
except:
databaseConnection.rollback()
socorro_util.reportExceptionAndAbort(logger)
| gpl-2.0 | -3,316,191,166,787,014,000 | 65.762712 | 236 | 0.498124 | false |
ntthuy11/CodeFights | Arcade/04_Python/01_MeetPython/mexFunction.py | 1 | 1362 | # You've just started to study impartial games, and came across an interesting theory. The theory is quite complicated, but
# it can be narrowed down to the following statements: solutions to all such games can be found with the mex function.
# Mex is an abbreviation of minimum excludant: for the given set s it finds the minimum non-negative integer that is not
# present in s.
# You don't yet know how to implement such a function efficiently, so would like to create a simplified version. For the
# given set s and given an upperBound, implement a function that will find its mex if it's smaller than upperBound or
# return upperBound instead.
#
# Example
# For s = [0, 4, 2, 3, 1, 7] and upperBound = 10,
# the output should be
# mexFunction(s, upperBound) = 5.
# 5 is the smallest non-negative integer that is not present in s, and it is smaller than upperBound.
#
# For s = [0, 4, 2, 3, 1, 7] and upperBound = 3,
# the output should be
# mexFunction(s, upperBound) = 3.
# The minimum excludant for the given set is 5, but it's greater than upperBound, so the output should be 3.
def mexFunction(s, upperBound):
found = -1
for i in range(upperBound):
if not i in s:
found = i
break
else:
found = upperBound # this line is what CodeFights asks for
return found
| mit | -4,172,678,401,009,438,700 | 45.965517 | 124 | 0.690896 | false |
flower-pot/xf-indicator | xf_indicator/build_status.py | 1 | 2244 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Frederic Branczyk [email protected]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
### END LICENSE
from gi.repository import Gtk, GObject
from enum import Enum
class BuildStatus(Enum):
active = (1)
failing = (2)
not_existing = (3)
unknown = (4)
passing = (5)
def __init__(self, number):
self._value_ = number
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
def __eq__(self, other):
if self.__class__ is other.__class__:
return self.value == other.value
return NotImplemented
| mit | 7,255,914,806,074,165,000 | 37.033898 | 79 | 0.666667 | false |
rasmusprentow/mvln | mvln/test/test_converter.py | 1 | 1427 | #converter_test.py
import sys
sys.path.insert(0,"..")
sys.path.insert(0,"mvln")
from mvln import *
import unittest, os, shutil
testfolder = os.getcwd()+ "/__tmptest__/src/testfolder/"
testfolder_dest = os.getcwd()+"/__tmptest__/dst/testfolder/"
testfolder2 = os.getcwd()+ "/__tmptest__/src/testfolder2/"
testfolder2_dest = os.getcwd()+"/__tmptest__/dst/testfolder2/"
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
try:
shutil.rmtree("__tmptest__")
except OSError:
pass
os.mkdir("__tmptest__")
os.mkdir("__tmptest__/dst")
os.mkdir("__tmptest__/src")
os.mkdir(testfolder)
os.mkdir(testfolder2)
f = open(testfolder+"testfile",'w')
f.write("testestest")
f.close()
f = open(testfolder2+"testfile",'w')
f.write("testestest")
f.close()
self.converter = Converter( testfolder + " " + testfolder_dest + "\n" +
testfolder2 + " " + testfolder2_dest + "\n")
def test_getlines(self):
result = self.converter.getLines()
self.assertEqual(result[1], testfolder2 + " " + testfolder2_dest)
def test_convert(self):
result = self.converter.getFiles()
self.assertIsInstance(result[1], MvLnFile)
self.assertEqual(result[1].dst, testfolder2_dest)
self.assertEqual(result[1].src, testfolder2)
self.assertEqual(result[0].dst, testfolder_dest)
self.assertEqual(result[0].src, testfolder)
if __name__ == '__main__':
unittest.main() | gpl-2.0 | 1,918,018,458,159,021,300 | 22.8 | 75 | 0.662228 | false |
ddico/odoo | addons/project_timesheet_holidays/models/hr_holidays.py | 2 | 5160 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class HolidaysType(models.Model):
_inherit = "hr.leave.type"
def _default_project_id(self):
company = self.company_id if self.company_id else self.env.company
return company.leave_timesheet_project_id.id
def _default_task_id(self):
company = self.company_id if self.company_id else self.env.company
return company.leave_timesheet_task_id.id
timesheet_generate = fields.Boolean('Generate Timesheet', default=True, help="If checked, when validating a time off, timesheet will be generated in the Vacation Project of the company.")
timesheet_project_id = fields.Many2one('project.project', string="Project", default=_default_project_id, domain="[('company_id', '=', company_id)]", help="The project will contain the timesheet generated when a time off is validated.")
timesheet_task_id = fields.Many2one('project.task', string="Task for timesheet", default=_default_task_id, domain="[('project_id', '=', timesheet_project_id), ('company_id', '=', company_id)]")
@api.onchange('timesheet_task_id')
def _onchange_timesheet_generate(self):
if self.timesheet_task_id or self.timesheet_project_id:
self.timesheet_generate = True
else:
self.timesheet_generate = False
@api.onchange('timesheet_project_id')
def _onchange_timesheet_project(self):
company = self.company_id if self.company_id else self.env.company
default_task_id = company.leave_timesheet_task_id
if default_task_id and default_task_id.project_id == self.timesheet_project_id:
self.timesheet_task_id = default_task_id
else:
self.timesheet_task_id = False
if self.timesheet_project_id:
self.timesheet_generate = True
else:
self.timesheet_generate = False
@api.constrains('timesheet_generate', 'timesheet_project_id', 'timesheet_task_id')
def _check_timesheet_generate(self):
for holiday_status in self:
if holiday_status.timesheet_generate:
if not holiday_status.timesheet_project_id or not holiday_status.timesheet_task_id:
raise ValidationError(_("Both the internal project and task are required to "
"generate a timesheet for the time off. If you don't want a timesheet, you should "
"leave the internal project and task empty."))
class Holidays(models.Model):
_inherit = "hr.leave"
timesheet_ids = fields.One2many('account.analytic.line', 'holiday_id', string="Analytic Lines")
def _validate_leave_request(self):
""" Timesheet will be generated on leave validation only if a timesheet_project_id and a
timesheet_task_id are set on the corresponding leave type. The generated timesheet will
be attached to this project/task.
"""
# create the timesheet on the vacation project
for holiday in self.filtered(
lambda request: request.holiday_type == 'employee' and
request.holiday_status_id.timesheet_project_id and
request.holiday_status_id.timesheet_task_id):
holiday._timesheet_create_lines()
return super(Holidays, self)._validate_leave_request()
def _timesheet_create_lines(self):
self.ensure_one()
vals_list = []
work_hours_data = self.employee_id.list_work_time_per_day(
self.date_from,
self.date_to,
)
for index, (day_date, work_hours_count) in enumerate(work_hours_data):
vals_list.append(self._timesheet_prepare_line_values(index, work_hours_data, day_date, work_hours_count))
timesheets = self.env['account.analytic.line'].sudo().create(vals_list)
return timesheets
def _timesheet_prepare_line_values(self, index, work_hours_data, day_date, work_hours_count):
self.ensure_one()
return {
'name': "%s (%s/%s)" % (self.holiday_status_id.name or '', index + 1, len(work_hours_data)),
'project_id': self.holiday_status_id.timesheet_project_id.id,
'task_id': self.holiday_status_id.timesheet_task_id.id,
'account_id': self.holiday_status_id.timesheet_project_id.analytic_account_id.id,
'unit_amount': work_hours_count,
'user_id': self.employee_id.user_id.id,
'date': day_date,
'holiday_id': self.id,
'employee_id': self.employee_id.id,
'company_id': self.holiday_status_id.timesheet_task_id.company_id.id or self.holiday_status_id.timesheet_project_id.company_id.id,
}
def action_refuse(self):
""" Remove the timesheets linked to the refused holidays """
result = super(Holidays, self).action_refuse()
timesheets = self.sudo().mapped('timesheet_ids')
timesheets.write({'holiday_id': False})
timesheets.unlink()
return result
| agpl-3.0 | 7,238,839,353,363,624,000 | 48.142857 | 239 | 0.645736 | false |
rice-apps/petition-app | main.py | 1 | 1292 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
from controllers import main, petitions, organizations, dashboards
from config import *
app = webapp2.WSGIApplication([
(PETITIONS_URI, petitions.PetitionsHandler),
(PETITIONS_SIGN_URI, petitions.SignHandler),
(PETITIONS_UNSIGN_URI, petitions.UnsignHandler),
(MY_URI, petitions.MyPageHandler),
(POSITIONS_POPULATE_URI, petitions.PositionsHandler),
(ORGANIZATION_URI, organizations.OrganizationsHandler),
(DASHBOARD_URI, dashboards.DashboardHandler),
(DASHBOARD_ADMIN_URI, dashboards.SaveAdminsHandler),
(DASHBOARD_ELECTIONS_URI, dashboards.ElectionHandler),
(ERROR_URI, main.ErrorHandler),
('/', main.MainHandler)
], debug=DEBUG)
| mit | -3,511,202,814,960,563,700 | 37 | 74 | 0.75387 | false |
MycroftAI/adapt | adapt/expander.py | 1 | 10830 | # Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from six.moves import xrange
__author__ = 'seanfitz'
class SimpleGraph(object):
"""This class is to graph connected nodes
Note:
hash is a type that is hashable so independant values and tuples
but not objects, classes or lists.
"""
def __init__(self):
"""init an empty set"""
self.adjacency_lists = {}
def add_edge(self, a, b):
"""Used to add edges to the graph. 'a' and 'b' are vertexes and
if 'a' or 'b' doesn't exisit then the vertex is created
Args:
a (hash): is one vertex of the edge
b (hash): is another vertext of the edge
"""
neighbors_of_a = self.adjacency_lists.get(a)
if not neighbors_of_a:
neighbors_of_a = set()
self.adjacency_lists[a] = neighbors_of_a
neighbors_of_a.add(b)
neighbors_of_b = self.adjacency_lists.get(b)
if not neighbors_of_b:
neighbors_of_b = set()
self.adjacency_lists[b] = neighbors_of_b
neighbors_of_b.add(a)
def get_neighbors_of(self, a):
"""This will return the neighbors of the vertex
Args:
a (hash): is the vertex to get the neighbors for
Returns:
[] : a list of neighbors_of 'a'
Will return an empty set if 'a' doesn't exist or has no
neightbors.
"""
return self.adjacency_lists.get(a, set())
def vertex_set(self):
"""This returns a list of vertexes included in graph
Returns:
[] : a list of vertexes include in graph
"""
return list(self.adjacency_lists)
def bronk(r, p, x, graph):
"""This is used to fine cliques and remove them from graph
Args:
graph (graph): this is the graph of verticies to search for
cliques
p (list): this is a list of the verticies to search
r (list): used by bronk for the search
x (list): used by bronk for the search
Yields:
list : found clique of the given graph and verticies
"""
if len(p) == 0 and len(x) == 0:
yield r
return
for vertex in p[:]:
r_new = r[::]
r_new.append(vertex)
p_new = [val for val in p if val in graph.get_neighbors_of(vertex)] # p intersects N(vertex)
x_new = [val for val in x if val in graph.get_neighbors_of(vertex)] # x intersects N(vertex)
for result in bronk(r_new, p_new, x_new, graph):
yield result
p.remove(vertex)
x.append(vertex)
def get_cliques(vertices, graph):
"""get cliques
Args:
verticies (list) : list of the verticies to search for cliques
graph (graph) : a graph used to find the cliques using verticies
Yields:
list: a clique from the graph
"""
for clique in bronk([], vertices, [], graph):
yield clique
def graph_key_from_tag(tag, entity_index):
"""Returns a key from a tag entity
Args:
tag (tag) : this is the tag selected to get the key from
entity_index (int) : this is the index of the tagged entity
Returns:
str : String representing the key for the given tagged entity.
"""
start_token = tag.get('start_token')
entity = tag.get('entities', [])[entity_index]
return str(start_token) + '-' + entity.get('key') + '-' + str(entity.get('confidence'))
class Lattice(object):
"""This manages a list of items or lists
Attributes:
nodes (list) : is a list of items or lists.
This is used to track items and lists that are a part of the
Lattice
"""
def __init__(self):
"""Creates the Lattice with an empty list"""
self.nodes = []
def append(self, data):
"""Appends items or lists to the Lattice
Args:
data (item,list) : The Item or List to be added to the Lattice
"""
if isinstance(data, list) and len(data) > 0:
self.nodes.append(data)
else:
self.nodes.append([data])
def traverse(self, index=0):
""" This is used to produce a list of lists where each each item
in that list is a diffrent combination of items from the lists
within with every combination of such values.
Args:
index (int) : the index at witch to start the list.
Note this is used only in the function as a processing
Returns:
list : is every combination.
"""
if index < len(self.nodes):
for entity in self.nodes[index]:
for next_result in self.traverse(index=index+1):
if isinstance(entity, list):
yield entity + next_result
else:
yield [entity] + next_result
else:
yield []
class BronKerboschExpander(object):
"""
BronKerboschExpander
Given a list of tagged entities (from the existing entity tagger implementation or another), expand out
valid parse results.
A parse result is considered valid if it contains no overlapping spans.
Since total confidence of a parse result is based on the sum of confidences of the entities, there is no sense
in yielding any potential parse results that are a subset/sequence of a larger valid parse result. By comparing
this concept to that of maximal cliques (https://en.wikipedia.org/wiki/Clique_problem), we can use well known
solutions to the maximal clique problem like the Bron/Kerbosch algorithm (https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm).
By considering tagged entities that do not overlap to be "neighbors", BronKerbosch will yield a set of maximal
cliques that are also valid parse results.
"""
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def _build_graph(self, tags):
"""Builds a graph from the entities included in the tags.
Note this is used internally.
Args:
tags (list): A list of the tags to include in graph
Returns:
graph : this is the resulting graph of the tagged entities.
"""
graph = SimpleGraph()
for tag_index in xrange(len(tags)):
for entity_index in xrange(len(tags[tag_index].get('entities'))):
a_entity_name = graph_key_from_tag(tags[tag_index], entity_index)
tokens = self.tokenizer.tokenize(tags[tag_index].get('entities', [])[entity_index].get('match'))
for tag in tags[tag_index + 1:]:
start_token = tag.get('start_token')
if start_token >= tags[tag_index].get('start_token') + len(tokens):
for b_entity_index in xrange(len(tag.get('entities'))):
b_entity_name = graph_key_from_tag(tag, b_entity_index)
graph.add_edge(a_entity_name, b_entity_name)
return graph
def _sub_expand(self, tags):
"""This called by expand to find cliques
Args:
tags (list): a list of the tags used to get cliques
Yields:
list : list of sorted tags by start_token this is a clique
"""
entities = {}
graph = self._build_graph(tags)
# name entities
for tag in tags:
for entity_index in xrange(len(tag.get('entities'))):
node_name = graph_key_from_tag(tag, entity_index)
if not node_name in entities:
entities[node_name] = []
entities[node_name] += [
tag.get('entities', [])[entity_index],
tag.get('entities', [])[entity_index].get('confidence'),
tag
]
for clique in get_cliques(list(entities), graph):
result = []
for entity_name in clique:
start_token = int(entity_name.split("-")[0])
old_tag = entities[entity_name][2]
tag = {
'start_token': start_token,
'entities': [entities.get(entity_name)[0]],
'confidence': entities.get(entity_name)[1] * old_tag.get('confidence', 1.0),
'end_token': old_tag.get('end_token'),
'match': old_tag.get('entities')[0].get('match'),
'key': old_tag.get('entities')[0].get('key'),
'from_context': old_tag.get('from_context', False)
}
result.append(tag)
result = sorted(result, key=lambda e: e.get('start_token'))
yield result
def expand(self, tags, clique_scoring_func=None):
"""This is the main function to expand tags into cliques
Args:
tags (list): a list of tags to find the cliques.
clique_scoring_func (func): a function that returns a float
value for the clique
Returns:
list : a list of cliques
"""
lattice = Lattice()
overlapping_spans = []
def end_token_index():
return max([t.get('end_token') for t in overlapping_spans])
for i in xrange(len(tags)):
tag = tags[i]
if len(overlapping_spans) > 0 and end_token_index() >= tag.get('start_token'):
overlapping_spans.append(tag)
elif len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
overlapping_spans = [tag]
else:
lattice.append(overlapping_spans)
overlapping_spans = [tag]
if len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
else:
lattice.append(overlapping_spans)
return lattice.traverse()
| apache-2.0 | 3,364,695,214,877,201,400 | 34.048544 | 141 | 0.574515 | false |
eReuse/DeviceHub | ereuse_devicehub/tests/test_resources/test_events/test_device_event/test_device_event_basic.py | 1 | 6536 | from datetime import timedelta
from time import sleep
from assertpy import assert_that
from bson import ObjectId
from pydash import pick
from ereuse_devicehub.exceptions import SchemaError
from ereuse_devicehub.resources.hooks import TooLateToDelete, MaterializeEvents
from ereuse_devicehub.tests import TestStandard
from ereuse_utils.naming import Naming
class TestDeviceEventBasic(TestStandard):
"""
Tests that take care of the creation and configuration of device events.
"""
def test_creation(self):
"""
Tests that device events have been created correctly, taking special care of:
- All events have been created
- @type and type
- URL
- prefix
"""
events = ('add', 'allocate', 'deallocate', 'dispose', 'free',
'locate', 'ready', 'receive', 'register', 'remove', 'repair', 'snapshot', 'test-hard-drive',
'to-dispose', 'to-repair', 'to-prepare') # all subclasses from DeviceEvent in resource type
events = ['{}{}{}'.format('devices', Naming.RESOURCE_PREFIX, event) for event in events] # we prefix them
events += ['accounts', 'devices', 'computer'] # We check some non-prefixed regular resources...
assert_that(self.domain).contains_key(*events)
# Type of snapshot should be 'devices:Snapshot'
snapshot = self.domain['{}{}{}'.format('devices', Naming.RESOURCE_PREFIX, 'snapshot')]
assert_that(snapshot['schema']['@type']['allowed']) \
.is_equal_to({'{}{}{}'.format('devices', Naming.TYPE_PREFIX, 'Snapshot')})
devices = self.domain['devices']
# And any other type not subclass from DeviceEvent should be without prefix
assert_that(devices['schema']['@type']['allowed']).contains('Device', 'Computer', 'HardDrive') # ...and more
# Checking that the url generated contains 'devices' for DeviceEvent...
assert_that(snapshot['url']).is_equal_to('events/devices/snapshot')
# ...but it doesn't add devices to others (it would be then 'devices/devices')
assert_that(devices['url']).is_equal_to('devices')
def test_delete_in_time(self):
"""Tests deleting an event only in time."""
# Let's set a small amount of time and try to delete the device after it
self.app.config['TIME_TO_DELETE_RESOURCES'] = timedelta(seconds=1)
SNAPSHOT_URL = self.DEVICE_EVENT + '/' + self.SNAPSHOT
snapshot = self.post_fixture(self.SNAPSHOT, SNAPSHOT_URL, 'xps13')
sleep(2)
response, status = self.delete(SNAPSHOT_URL, item=snapshot['_id'])
self.assert_error(response, status, TooLateToDelete)
def test_groups(self):
"""Tests a generic event with the 'groups' field set."""
# Let's create a lot and a package, both with 2 different devices
computers_id = self.get_fixtures_computers()
lot = self.get_fixture(self.GROUPS, 'lot')
lot['children']['devices'] = computers_id[0:2]
lot = self.post_201(self.LOTS, lot)
package = self.get_fixture(self.GROUPS, 'package')
package['children']['devices'] = computers_id[2:4]
package = self.post_201(self.PACKAGES, package)
# Let's post the event
READY_URL = '{}/{}'.format(self.DEVICE_EVENT, 'ready')
ready = self.get_fixture(self.GROUPS, 'ready')
ready['groups']['lots'] = [lot['_id']]
ready['groups']['packages'] = [package['_id']]
ready = self.post_201(READY_URL, ready)
self._check(lot['_id'], package['_id'], computers_id, ready['_id'])
# If we try to do an event with both devices and a groups
ready = self.get_fixture(self.GROUPS, 'ready')
ready['devices'] = computers_id
response, status = self.post(READY_URL, ready)
self.assert_error(response, status, SchemaError)
# Now let's try with descendants
# We add one new extra device to package
snapshot = self.post_fixture(self.SNAPSHOT, '{}/{}'.format(self.DEVICE_EVENT, self.SNAPSHOT), 'vaio')
package['children']['devices'].append(snapshot['device'])
self.patch_200(self.PACKAGES, item=package['_id'], data=pick(package, 'children', '@type'))
# adding package inside lot and event with only lot. The event should be done to package and its devices
lot['children']['packages'] = [package['_id']]
self.patch_200(self.LOTS, item=lot['_id'], data=pick(lot, 'children', '@type'))
receive = self.get_fixture('receive', 'receive')
receive['groups'] = {'lots': [lot['_id']]}
receive['receiver'] = self.get_first('accounts')['_id']
receive = self.post_201('{}/{}'.format(self.DEVICE_EVENT, 'receive'), receive)
# Preparing to check
self._check(lot['_id'], package['_id'], computers_id + [snapshot['device']], receive['_id'])
# Try if label does not exist
package['children']['devices'].append('This label does not exist')
_, status = self.patch(self.PACKAGES, item=package['_id'], data=pick(package, 'children', '@type'))
self.assert422(status)
def _check(self, lot_id: str, package_id: str, computers_id: list, event_id: ObjectId):
"""Checks that the event contains the devices and groups, and otherwise."""
event = self.get_200(self.EVENTS, item=event_id)
materialized_event = pick(event, *MaterializeEvents.FIELDS)
lot = self.get_200(self.LOTS, item=lot_id)
package = self.get_200(self.PACKAGES, item=package_id)
# Both event and groups contain each other
assert_that(event['groups']).has_lots([lot_id]).has_packages([package_id])
lot = self.get_200(self.LOTS, item=lot['_id'])
assert_that(lot['events']).contains(materialized_event)
package = self.get_200(self.PACKAGES, item=package['_id'])
assert_that(package['events']).contains(materialized_event)
# Both event and devices contain each other
assert_that(event).contains('devices')
assert_that(event['devices']).contains_only(*computers_id)
for computer_id in computers_id:
computer = self.get_200(self.DEVICES, item=computer_id, embedded={'components': True})
assert_that(computer['events']).contains(materialized_event)
# Let's ensure the events have been materialized for components too
for component in computer['components']:
assert_that(component['events']).contains(materialized_event)
| agpl-3.0 | 338,278,350,866,452,600 | 52.57377 | 117 | 0.635863 | false |
LegionXI/pydarkstar | scrub.py | 1 | 5707 | """
Create item database.
"""
import logging
import os
import re
import pydarkstar.logutils
import pydarkstar.scrubbing.ffxiah
import pydarkstar.itemlist
import pydarkstar.options
import pydarkstar.common
class Options(pydarkstar.options.Options):
"""
Reads options from config file, then from command line.
"""
def __init__(self):
super(Options, self).__init__(config='scrub.yaml', description=__doc__)
self.verbose = False # error, info, and debug
self.silent = False # error only
self.stub = 'items' # output file stub
self.overwrite = False # overwrite output
self.backup = False # backup output
self.save = False # save config
self.force = False # redownload
self.pkl = False # save pkl files
self.threads = -1 # cpu threads during download
self.stock01 = 5 # default stock for singles
self.stock12 = 5 # default stock for stacks
self.itemids = [] # a list of item ids
self.urls = [] # a list of category urls
# logging
self.add_argument('--verbose', action='store_true',
help='report debug, info, and error')
self.add_argument('--silent', action='store_true',
help='report error only')
# output
self.add_argument(dest='stub', nargs='?', type=str, default=self.stub,
help='output file stub')
self.add_argument('--overwrite', action='store_true',
help='overwrite output file')
self.add_argument('--backup', action='store_true',
help='backup output file')
self.add_argument('--save', action='store_true',
help='save config file (and exit)')
# scrub parameters
self.add_argument('--force', action='store_true',
help='start from scratch')
self.add_argument('--pkl', action='store_true',
help='save pkl files')
self.add_argument('--threads', type=int, default=self.threads, metavar=self.threads,
help='number of cpu threads to use')
self.add_argument('--urls', type=str, nargs='*', action='append', default=self.urls, metavar='url',
help='a list of category urls')
self.add_argument('--itemids', type=int, nargs='*', action='append', default=self.itemids, metavar='itemids',
help='a list of item ids')
# defaults
self.add_argument('--stock01', type=int, default=self.stock01, metavar=self.stock01,
help='default stock for singles')
self.add_argument('--stock12', type=int, default=self.stock12, metavar=self.stock12,
help='default stock for stacks')
self.exclude('itemids')
self.exclude('urls')
def parse_args(self, args=None):
super(Options, self).parse_args(args)
urls = []
for obj in self.urls:
if isinstance(obj, list):
urls.extend(obj)
else:
urls.append(obj)
self.urls = urls
if not self.urls:
self.urls = None
itemids = []
for obj in self.itemids:
if isinstance(obj, list):
itemids.extend(obj)
else:
itemids.append(obj)
self.itemids = itemids
if not self.itemids:
self.itemids = None
def main():
"""
Main function.
"""
# get options
opts = Options()
opts.parse_args()
pydarkstar.logutils.basicConfig(
verbose=opts.verbose, silent=opts.silent, fname='scrub.log')
logging.info('start')
# log options
opts.log_values(level=logging.INFO)
# save options
if opts.save:
opts.save = False
opts.dump()
return
# check output file name validity
oname = os.path.abspath('{}.csv'.format(re.sub(r'\.csv$', '', opts.stub)))
if not opts.overwrite and not opts.backup:
if os.path.exists(oname):
logging.error('output file already exists!\n\t%s', oname)
logging.error('please use --overwrite or --backup')
exit(-1)
# scub data
scrubber = pydarkstar.scrubbing.ffxiah.FFXIAHScrubber()
scrubber.save = opts.pkl
data = scrubber.scrub(force=opts.force, threads=opts.threads, urls=opts.urls, ids=opts.itemids)
# create item list from data
ilist = pydarkstar.itemlist.ItemList()
for itemid in data:
# singles
try:
price01, sell01 = data[itemid]['median'], True
# do not sell items without a price
if price01 <= 0:
price01, sell01 = None, False
except KeyError:
price01, sell01 = None, False
# stacks
try:
price12, sell12 = data[itemid]['stack price'], True
# do not sell items without a price
if price12 <= 0:
price12, sell12 = None, False
except KeyError:
price12, sell12 = None, False
# the name doesn't really matter
try:
name = data[itemid]['name']
except KeyError:
name=None
ilist.add(itemid, name=name,
price01=price01, stock01=opts.stock01, sell01=sell01, buy01=True,
price12=price12, stock12=opts.stock12, sell12=sell12, buy12=True)
# backup file
if opts.backup:
pydarkstar.common.backup(oname)
# overwrites if exists, but we checked already
ilist.savecsv(oname)
def cleanup():
logging.info('exit\n')
if __name__ == '__main__':
with pydarkstar.logutils.capture():
main()
cleanup()
| mit | -6,963,018,258,926,190,000 | 31.061798 | 117 | 0.574908 | false |
RickyCook/AliasKeepr | aliaskeepr.py | 1 | 2237 | #!/usr/bin/env python
import argparse
import os
import re
import shutil
import sys
from ConfigParser import SafeConfigParser
PARSER = argparse.ArgumentParser(description="Import some aliases")
PARSER.add_argument('profile', help="Profile to output config for")
PARSER.add_argument('-c', '--config',
default='~/.akrc',
help="Directory where profiles are stored")
PARSER.add_argument('--init-alias',
default='ak',
help="When using the 'init' profile, the alias "
"name to insert")
ALIAS_RE = re.compile('^[A-Za-z0-9 _-]+$')
def profile_filename(config_dir, profile_name):
return os.path.expanduser('%s/%s.ini' % (config_dir, profile_name))
def main():
args = PARSER.parse_args()
if args.profile == 'init':
write_init_profile(args.config, args.init_alias)
profile_fn = profile_filename(args.config, args.profile)
profile_commands_dir = os.path.expanduser('%s/.%s' % (args.config, args.profile))
sys.stderr.write("Using profile in '%s'\n" % profile_fn)
config = SafeConfigParser()
config.read(profile_fn)
try:
shutil.rmtree(profile_commands_dir, ignore_errors=True)
except OSError:
pass
os.mkdir(profile_commands_dir)
for alias, command in config.items('aliases'):
if not ALIAS_RE.match(alias):
sys.stderr.write("Alias '%s' not allowed; skipped\n" % alias)
continue
if '$@' not in command:
command = '%s "$@"' % command
command_fn = '%s/%s' % (profile_commands_dir, alias)
with open(command_fn, 'w') as handle:
handle.write(command)
print "function '%s' { eval \"$(cat '%s')\" }" % (alias, command_fn)
print
print '# USAGE: eval "$("%s" "%s")"' % (__file__, args.profile)
def write_init_profile(config_dir, init_alias):
try:
os.mkdir(os.path.expanduser(config_dir))
except OSError:
pass
my_abs_path = os.path.abspath(os.path.expanduser(__file__))
with open(profile_filename(config_dir, 'init'), 'w') as handle:
handle.write('[aliases]\n')
handle.write('{init_alias} = eval "$("{my_path}" "$@")"'.format(
init_alias=init_alias,
my_path=my_abs_path,
))
if __name__ == '__main__':
main()
| mit | -4,410,401,499,480,091,600 | 25.630952 | 83 | 0.621815 | false |
blacksea3/medit | medit/blog/urls.py | 1 | 1191 | from django.conf.urls import url
from . import views
urlpatterns=[
url(r'^$', views.test, name='test'),
url(r'^test/$', views.test, name='test'),
url(r'^login/$', views.login, name='login'),
url(r'^index/$', views.index, name='index'),
url(r'^block-add/$', views.block_add, name='block_add'),
url(r'^block-add.html/$', views.block_add, name='block_add'),
url(r'^block-list.html/$', views.block_list, name='block_list'),
url(r'^block-edit/$', views.block_edit, name='block_edit'),
url(r'^block-edit.html/$', views.block_edit, name='block_edit'),
url(r'^block-del/$', views.block_del, name='block_del'),
url(r'^article-add/$', views.article_add, name='article_add'),
url(r'^article-add.html/$', views.article_add, name='article_add'),
url(r'^article-list.html/$', views.article_list, name='article_list'),
url(r'^article-edit/$', views.article_edit, name='article_edit'),
url(r'^article-edit.html/$', views.article_edit, name='article_edit'),
url(r'^article-del/$', views.article_del, name='article_del'),
url(r'^article-upload-file/$', views.article_upload_file, name='article_upload_file'),
url(r'^ck-upload-img/$', views.ck_upload_img, name='ck_upload_img'),
]
| apache-2.0 | 4,919,236,522,956,086,000 | 46.64 | 87 | 0.660789 | false |
michaelimfeld/notipy-server | notipyserver/backends/telegram/userregistration.py | 1 | 1411 | """
`notipyserver` - User-Notification-Framework server
Provides a telegram handler function
for user registration.
:copyright: (c) by Michael Imfeld
:license: MIT, see LICENSE for details
"""
import telegram
from .usermanager import add_user, add_group
def register(bot, update):
"""
Saves the telegram username and the chat_id from the given
update object to a file.
Args:
bot (telegram.Bot): The bot instance.
update (telegram.Update): The message update.
"""
if update.message.chat.type == "group":
recipient_name = update.message.chat.title
register_function = add_group
name = update.message.chat.title
else:
if not update.message.chat.username:
message = "Please setup a telegram username to use this bot."
bot.sendMessage(chat_id=update.message.chat_id, text=message)
return
recipient_name = update.message.chat.username
register_function = add_user
name = update.message.chat.first_name
is_new = register_function(recipient_name, update.message.chat_id)
if is_new:
message = """
Hi {}!
Your registration was *successful* 🎉.
""".format(name).strip()
else:
message = "Already registered!"
bot.sendMessage(
chat_id=update.message.chat_id,
text=message,
parse_mode=telegram.ParseMode.MARKDOWN)
| mit | -8,052,298,776,417,543,000 | 27.16 | 73 | 0.653409 | false |
tracon/dragontail | site_specific/tracon2016/management/commands/setup_tracon2016.py | 1 | 6630 | # encoding: utf-8
from __future__ import print_function, unicode_literals
from datetime import datetime, timedelta, date
from django.core.files import File
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from wagtail.wagtailcore.models import Site
from dragontail.content.models import BasicPage, TemplateSettings
class Command(BaseCommand):
args = ''
help = 'Setup example content'
def add_arguments(self, parser):
parser.add_argument('hostname', type=str)
def handle(self, *args, **options):
Setup(hostname=options['hostname']).setup()
class Setup(object):
def __init__(self, hostname):
self.hostname = hostname
def setup(self):
print('NOTE: Setting up Tracon (2016) site at {hostname}'.format(hostname=self.hostname))
self.setup_content()
# self.setup_ads()
# self.setup_blog()
def setup_content(self):
t = now()
is_default_site = not Site.objects.exists()
self.root_page, unused = BasicPage.objects.get_or_create(
slug='index',
defaults=dict(
title='Tracon Tampere-talossa 3.–4. syyskuuta 2016',
depth=0
)
)
self.site, unused = Site.objects.get_or_create(hostname=self.hostname, defaults=dict(
is_default_site=is_default_site,
root_page=self.root_page,
))
self.template_settings, unused = TemplateSettings.objects.get_or_create(
site=self.site,
defaults=dict(
base_template='tracon11_base.jade',
basic_page_template='tracon11_page.jade',
blog_index_template='tracon11_blog_index.jade',
blog_post_template='tracon11_blog_post.jade',
)
)
return
ordering = 0
for page_slug, page_title, child_pages in [
('front-page', 'Tracon Tampere-talossa 3.–4. syyskuuta 2016', []),
('blog', 'Ajankohtaista', []), # pseudo page for menu, actually taken over by blog
('tapahtuma', 'Tapahtuma', [
('tyovoima', 'Vänkäriksi'),
('jarjestyssaannot', 'Järjestyssäännöt'),
('tapahtumapaikka', 'Tapahtumapaikka'),
]),
('ohjelma', 'Ohjelma', [
('ohjelmanjarjestajaksi', 'Ohjelmanjärjestäjäksi'),
]),
('liput', 'Liput', []),
('yhteys', 'Ota yhteyttä!', [
('conitea', 'Järjestäjät'),
('media', 'Tiedotusvälineille'),
('sponsorit', 'Yhteistyökumppaneille'),
])
]:
ordering += 10
parent_page, unused = Page.objects.get_or_create(
site=self.site,
parent=None,
slug=page_slug,
defaults=dict(
title=page_title,
body='Placeholder for {slug}'.format(slug=page_slug),
public_from=t,
visible_from=t,
order=ordering,
)
)
# v2
child_ordering = 0
for child_slug, child_title in child_pages:
child_ordering += 10
child_page, unused = Page.objects.get_or_create(
site=self.site,
parent=parent_page,
slug=child_slug,
defaults=dict(
title=child_title,
body='Placeholder for {slug}'.format(slug=child_slug),
public_from=t,
visible_from=t,
order=child_ordering,
)
)
# v2
if child_page.order == 0:
child_page.order = child_ordering
child_page.save()
front_page = Page.objects.get(site=self.site, slug='front-page')
if not front_page.override_menu_text:
front_page.override_menu_text = 'Etusivu'
# v11
if not front_page.override_page_template:
front_page.override_page_template = 'tracon11_front_page.jade'
if not front_page.page_controller_code or front_page.page_controller_code == 'events.tracommon.views:front_page_controller':
front_page.page_controller_code = 'site_specific.tracommon.views:front_page_controller'
front_page.save()
for path, target in [
('admin', '/admin/'),
]:
redirect, unused = Redirect.objects.get_or_create(
site=self.site,
path=path,
defaults=dict(
target=target
),
)
def setup_ads(self):
for banner_title, banner_url, banner_path in [
('Säätöyhteisö B2 ry', 'http://b2.fi', 'site_specific/tracon11/static/tracon11/img/b2-saatoa2008-wh-200.png'),
]:
try:
Banner.objects.get(sites=self.site, url=banner_url)
except Banner.DoesNotExist:
with open(banner_path, 'rb') as banner_file:
banner = Banner(
title=banner_title,
url=banner_url,
image_file=File(banner_file),
)
banner.save()
banner.sites = [self.site,]
banner.save()
def setup_blog(self):
"""
Set up a stub of the blog.tracon.fi site required by the front page blog box.
"""
blog_site, unused = Site.objects.get_or_create(hostname='blog.tracon.fi', defaults=dict(
name='Traconin blogi'
))
blog_site_settings, unused = SiteSettings.objects.get_or_create(site=blog_site, defaults=dict(
base_template='tracon11_base.jade',
page_template='tracon11_page.jade',
blog_index_template='tracon11_blog_index.jade',
blog_post_template='tracon11_blog_post.jade',
))
for category_slug, category_title in [
('conzine', 'Conzine'),
('palaute', 'Palaute'),
('jarjestaminen', 'Traconin järjestäminen'),
]:
BlogCategory.objects.get_or_create(
site=blog_site,
slug=category_slug,
defaults=dict(
title=category_title,
)
)
| mit | 6,052,349,529,121,051,000 | 33.581152 | 132 | 0.521575 | false |
mdoucet/reflectivity_ui | test/notebooks/event_reduction.py | 1 | 18262 | import sys
import time
import multiprocessing
import mantid.simpleapi as api
import numpy as np
from reflectivity_ui.interfaces.data_handling import instrument
def load_data(run="REF_M_30769"):
if run.startswith("/SNS"):
filepath = run
else:
filepath = '/SNS/REF_M/IPTS-21391/nexus/' + run + '.nxs.h5'
_instrument = instrument.Instrument()
ws_list = _instrument.load_data(filepath)
_n_counts = 0
_high_count_ws = None
for _ws in ws_list:
_i_counts = _ws.getNumberEvents()
if _n_counts < _i_counts:
_n_counts = _i_counts
_high_count_ws = _ws
return _high_count_ws
def get_peak(center, width, max_pixel=None):
peak_min = int(round(float(center) - float(width)/2.0))
peak_max = int(round(float(center) + float(width)/2.0+1.0))
if max_pixel is not None:
if peak_min < 0: peak_min = 0
if peak_max >= max_pixel: peak_max = max_pixel-1
return peak_min, peak_max
def get_wl_range(ws):
"""
Determine TOF range from the data
:param workspace ws: workspace to work with
"""
run_object = ws.getRun()
wl = run_object.getProperty('LambdaRequest').value[0]
chopper_speed = run_object.getProperty('SpeedRequest1').value[0]
# Cut the edges by using a width of 2.6 A
wl_min = (wl - 1.3 * 60.0 / chopper_speed)
wl_max = (wl + 1.3 * 60.0 / chopper_speed)
return [wl_min, wl_max]
def get_q_binning(q_min=0.001, q_max=0.15, q_step=-0.02):
if q_step > 0:
n_steps = np.int((q_max-q_min)/q_step)
return q_min + np.asarray([q_step * i for i in range(n_steps)])
else:
_step = 1.0+np.abs(q_step)
n_steps = np.int(np.log(q_max/q_min)/np.log(_step))
return q_min * np.asarray([_step**i for i in range(n_steps)])
def quicknxs_scale(theta, peak, low_res, norm_peak, norm_low_res):
"""
Scaling factor to multiply by to be compatible with QuickNXS 1.0.
"""
quicknxs_scale = (float(norm_peak[1])-float(norm_peak[0])) * (float(norm_low_res[1])-float(norm_low_res[0]))
quicknxs_scale /= (float(peak[1])-float(peak[0])) * (float(low_res[1])-float(low_res[0]))
_scale = 0.005 / np.sin(theta) if theta > 0.0002 else 1.0
quicknxs_scale *= _scale
return quicknxs_scale
class EventReflectivity(object):
"""
Event based reflectivit calculation.
List of items to be taken care of outside this class:
- Edge points cropping
- Calculate theta using SANGLE or not
- Angle offset
- Direct pixel overwrite
- DANGLE0 overwrite
Options that are left out:
- rounding up pixel to assign the proper Qx
"""
QX_VS_QZ = 0
KZI_VS_KZF = 1
DELTA_KZ_VS_QZ = 3
INSTRUMENT_4A = 0
INSTRUMENT_4B = 1
def __init__(self, scattering_workspace, direct_workspace,
signal_peak, signal_bck, norm_peak, norm_bck,
specular_pixel, signal_low_res, norm_low_res,
q_min=None, q_step=-0.02, q_max=None,
tof_range=None, theta=1.0, sample_length=10,
instrument=None):
"""
Pixel ranges include the min and max pixels.
:param scattering_workspace: Mantid workspace containing the reflected data
:param direct_workspace: Mantid workspace containing the direct beam data [if None, normalization won't be applied]
:param signal_peak: pixel min and max for the specular peak
:param signal_bck: pixel range of the background [if None, the background won't be subtracted]
:param norm_peak: pixel range of the direct beam peak
:param norm_bck: pixel range of the direct beam background [if None, the background won't be subtracted]
:param specular_pixel: pixel of the specular peak
:param signal_low_res: pixel range of the specular peak out of the scattering plane
:param norm_low_res: pixel range of the direct beam out of the scattering plane
:param q_min: value of lowest q point
:param q_step: step size in Q. Enter a negative value to get a log scale
:param q_min: value of largest q point
:param tof_range: TOF range,or None
:param theta: theta scattering angle in radians
:param sample_length: sample size, for resolution calculation
"""
if instrument in [self.INSTRUMENT_4A, self.INSTRUMENT_4B]:
self.instrument = instrument
else:
self.instrument = self.INSTRUMENT_4A
self.signal_peak = signal_peak
self.signal_bck = signal_bck
self.norm_peak = norm_peak
self.norm_bck = norm_bck
self.signal_low_res = signal_low_res
self.norm_low_res = norm_low_res
self.specular_pixel = specular_pixel
self.q_min = q_min
self.q_max = q_max
self.q_step = q_step
self.tof_range = tof_range
self.theta = theta
self.sample_length = sample_length
self._offspec_x_bins = None
self._offspec_z_bins = None
# Process workspaces
if self.tof_range is not None:
self._ws_sc = api.CropWorkspace(InputWorkspace=scattering_workspace,
XMin=tof_range[0], XMax=tof_range[1],
OutputWorkspace='_'+str(scattering_workspace))
self._ws_db = api.CropWorkspace(InputWorkspace=direct_workspace,
XMin=tof_range[0], XMax=tof_range[1],
OutputWorkspace='_'+str(direct_workspace))
else:
self._ws_sc = scattering_workspace
self._ws_db = direct_workspace
# Extract meta data
self.extract_meta_data()
def extract_meta_data(self):
# Set up basic data
self.n_x = int(self._ws_sc.getInstrument().getNumberParameter("number-of-x-pixels")[0])
self.n_y = int(self._ws_sc.getInstrument().getNumberParameter("number-of-y-pixels")[0])
self.pixel_width = float(self._ws_sc.getInstrument().getNumberParameter("pixel-width")[0]) / 1000.0
if self.instrument == self.INSTRUMENT_4B:
self.extract_meta_data_4B()
else:
self.extract_meta_data_4A()
h = 6.626e-34 # m^2 kg s^-1
m = 1.675e-27 # kg
self.constant = 1e-4 * m * self.source_detector_distance / h
if self.tof_range is None:
self.wl_range = get_wl_range(self._ws_sc)
else:
self.wl_range = [self.tof_range[0] / self.constant, self.tof_range[1] / self.constant]
if self.q_min is None:
self.q_min = 4.0*np.pi/self.wl_range[1] * np.sin(self.theta)
if self.q_max is None:
self.q_max = 4.0*np.pi/self.wl_range[0] * np.sin(self.theta)
# Q binning to use
self.q_bins = get_q_binning(self.q_min, self.q_max, self.q_step)
def extract_meta_data_4A(self):
run_object = self._ws_sc.getRun()
self.det_distance = run_object['SampleDetDis'].getStatistics().mean
source_sample_distance = run_object['ModeratorSamDis'].getStatistics().mean
if not run_object['SampleDetDis'].units in ['m', 'meter']:
self.det_distance /= 1000.0
if not run_object['ModeratorSamDis'].units in ['m', 'meter']:
source_sample_distance /= 1000.0
self.source_detector_distance = source_sample_distance + self.det_distance
def extract_meta_data_4B(self):
self.det_distance = 1.83
source_sample_distance = 13.63
self.source_detector_distance = source_sample_distance + self.det_distance
def __repr__(self):
output = "sample-det: %s\n" % self.det_distance
output += "pixel: %s\n" % self.pixel_width
output += "WL: %s %s\n" % (self.wl_range[0], self.wl_range[1])
output += "Q: %s %s\n" % (self.q_min, self.q_max)
output += "Theta = %s" % self.theta
return output
def specular(self, q_summing=True):
# Scattering data
refl, d_refl = self._reflectivity(self._ws_sc, peak_position=self.specular_pixel,
peak=self.signal_peak, low_res=self.signal_low_res,
theta=self.theta, q_summing=q_summing)
norm, d_norm = self._reflectivity(self._ws_db, peak_position=0,
peak=self.norm_peak, low_res=self.norm_low_res,
theta=self.theta, q_summing=False)
if False and self.norm_bck is not None:
norm_bck, d_norm_bck = self._norm_bck_in_pixel()
norm -= norm_bck
d_norm = np.sqrt(d_norm**2 + d_norm_bck**2)
db_bins = norm>0
if False and self.signal_bck is not None:
refl_bck, d_refl_bck = self._signal_bck_in_pixel()
refl -= refl_bck
d_refl = np.sqrt(d_refl**2 + d_refl_bck**2)
self.refl_bck = refl_bck[db_bins]/norm[db_bins]
self.d_refl_bck = np.sqrt(d_refl_bck[db_bins]**2 / norm[db_bins]**2 + refl_bck[db_bins]**2 * d_norm[db_bins]**2 / norm[db_bins]**4)
refl[db_bins] = refl[db_bins]/norm[db_bins]
d_refl[db_bins] = np.sqrt(d_refl[db_bins]**2 / norm[db_bins]**2 + refl[db_bins]**2 * d_norm[db_bins]**2 / norm[db_bins]**4)
self.refl = refl
self.d_refl = d_refl
return self.q_bins, refl, d_refl
def _signal_bck_in_pixel(self, normalize_to_single_pixel=False, q_bins=None):
q_bins = self.q_bins if q_bins is None else q_bins
refl_bck, d_refl_bck = self._reflectivity(self._ws_sc, peak_position=0, q_bins=q_bins,
peak=self.signal_bck, low_res=self.signal_low_res,
theta=self.theta, q_summing=False)
_pixel_area = (self.signal_bck[1]-self.signal_bck[0]+1.0)
if not normalize_to_single_pixel:
_pixel_area /= (self.signal_peak[1]-self.signal_peak[0]+1.0)
refl_bck /= _pixel_area
d_refl_bck /= _pixel_area
return refl_bck, d_refl_bck
def _norm_bck_in_pixel(self, q_bins=None):
if q_bins is None:
q_bins = self.q_bins
norm_bck, d_norm_bck = self._reflectivity(self._ws_db, peak_position=0, q_bins=q_bins,
peak=self.norm_bck, low_res=self.norm_low_res,
theta=self.theta, q_summing=False)
_pixel_area = (self.norm_bck[1]-self.norm_bck[0]+1.0) / (self.norm_peak[1]-self.norm_peak[0]+1.0)
norm_bck /= _pixel_area
d_norm_bck /= _pixel_area
return norm_bck, d_norm_bck
def slice(self, x_min=0.002, x_max=0.004, x_bins=None, z_bins=None,
refl=None, d_refl=None, normalize=False):
x_bins = self._offspec_x_bins if x_bins is None else x_bins
z_bins = self._offspec_z_bins if z_bins is None else z_bins
refl = self._offspec_refl if refl is None else refl
d_refl = self._offspec_d_refl if d_refl is None else d_refl
i_min = len(x_bins[x_bins<x_min])
i_max = len(x_bins[x_bins<x_max])
_spec = np.sum(refl[i_min:i_max], axis=0)
_d_spec = np.sum( (d_refl[i_min:i_max])**2, axis=0)
_d_spec = np.sqrt(_d_spec)
if normalize:
_spec /= (i_max-i_min)
_d_spec /= (i_max-i_min)
return z_bins, _spec, _d_spec
def _reflectivity(self, ws, peak_position, peak, low_res, theta, q_bins=None, q_summing=False):
"""
Assumes that the input workspace is normalized by proton charge.
"""
charge = ws.getRun()['gd_prtn_chrg'].value
_q_bins = self.q_bins if q_bins is None else q_bins
refl = np.zeros(len(_q_bins)-1)
_pixel_width = self.pixel_width if q_summing else 0.0
for i in range(low_res[0], int(low_res[1]+1)):
for j in range(peak[0], int(peak[1]+1)):
if self.instrument == self.INSTRUMENT_4A:
pixel = j * self.n_y + i
else:
pixel = i * self.n_y + j
evt_list = ws.getSpectrum(pixel)
if evt_list.getNumberEvents() == 0:
continue
wl_list = evt_list.getTofs() / self.constant
x_distance = _pixel_width * (peak_position - j)
delta_theta_f = np.arctan(x_distance / self.det_distance) / 2.0
qz=4.0*np.pi/wl_list * np.sin(theta + delta_theta_f) * np.cos(delta_theta_f)
_counts, _ = np.histogram(qz, bins=_q_bins)
refl += _counts
d_refl_sq = np.sqrt(refl) / charge
refl /= charge
return refl, d_refl_sq
def _get_events(self, ws, peak, low_res):
"""
Return an array of wavelengths for a given workspace.
"""
wl_events = np.asarray([])
for i in range(low_res[0], int(low_res[1]+1)):
for j in range(peak[0], int(peak[1]+1)):
if self.instrument == self.INSTRUMENT_4A:
pixel = j * self.n_y + i
else:
pixel = i * self.n_y + j
evt_list = ws.getSpectrum(pixel)
wl_list = evt_list.getTofs() / self.constant
wl_events = np.concatenate((wl_events, wl_list))
return wl_events
def off_specular(self, x_axis=None, x_min=-0.015, x_max=0.015, x_npts=50,
z_min=None, z_max=None, z_npts=-120, bck_in_q=None):
"""
Compute off-specular
:param x_axis: Axis selection
:param x_min: Min value on x-axis
:param x_max: Max value on x-axis
:param x_npts: Number of points in x (negative will produce a log scale)
:param z_min: Min value on z-axis (if none, default Qz will be used)
:param z_max: Max value on z-axis (if none, default Qz will be used)
:param z_npts: Number of points in z (negative will produce a log scale)
"""
# Z axis binning
qz_bins = self.q_bins
if z_min is not None and z_max is not None:
if z_npts < 0:
qz_bins = np.logspace(np.log10(z_min), np.log10(z_max), num=np.abs(z_npts))
else:
qz_bins = np.linspace(z_min, z_max, num=z_npts)
# X axis binning
if x_npts > 0:
qx_bins = np.linspace(x_min, x_max, num=x_npts)
else:
qx_bins = np.logspace(np.log10(x_min), np.log10(x_max), num=np.abs(x_npts))
wl_events = self._get_events(self._ws_db, self.norm_peak, self.norm_low_res)
wl_dist, wl_bins = np.histogram(wl_events, bins=60)
wl_middle = [(wl_bins[i+1]+wl_bins[i])/2.0 for i in range(len(wl_bins)-1)]
_refl, _d_refl = self._off_specular(self._ws_sc, wl_dist, wl_middle, qx_bins, qz_bins,
self.specular_pixel, self.theta, x_axis=x_axis)
db_charge = self._ws_db.getRun()['gd_prtn_chrg'].value
_refl *= db_charge * (wl_bins[1]-wl_bins[0])
_d_refl *= db_charge * (wl_bins[1]-wl_bins[0])
# Background
if self.signal_bck:
if bck_in_q is None:
print("Not implemented")
#refl_bck, d_refl_bck = self._signal_bck_in_pixel(normalize_to_single_pixel=True, q_bins=qz_bins)
else:
_, refl_bck, d_refl_bck = self.slice(bck_in_q[0], bck_in_q[1],
x_bins=qx_bins, z_bins=qz_bins,
refl=_refl, d_refl=_d_refl,
normalize=True)
_refl -= refl_bck
_d_refl = np.sqrt(_d_refl**2 + d_refl_bck**2)
self._offspec_x_bins = qx_bins
self._offspec_z_bins = qz_bins
self._offspec_refl = _refl
self._offspec_d_refl = _d_refl
return qx_bins, qz_bins, _refl, _d_refl
def _off_specular(self, ws, wl_dist, wl_bins, x_bins, z_bins, peak_position, theta, x_axis=None):
charge = ws.getRun()['gd_prtn_chrg'].value
refl = np.zeros([len(x_bins)-1, len(z_bins)-1])
counts = np.zeros([len(x_bins)-1, len(z_bins)-1])
for j in range(0, self.n_x):
wl_list = np.asarray([])
for i in range(self.signal_low_res[0], int(self.signal_low_res[1]+1)):
if self.instrument == self.INSTRUMENT_4A:
pixel = j * self.n_y + i
else:
pixel = i * self.n_y + j
evt_list = ws.getSpectrum(pixel)
wl_events = evt_list.getTofs() / self.constant
wl_list = np.concatenate((wl_events, wl_list))
k = 2.0 * np.pi / wl_list
wl_weights = 1.0/np.interp(wl_list, wl_bins, wl_dist, np.inf, np.inf)
x_distance = float(peak_position-j) * self.pixel_width
delta_theta_f = np.arctan(x_distance / self.det_distance)
theta_f = theta + delta_theta_f
qz = k * (np.sin(theta_f) + np.sin(theta))
qx = k * (np.cos(theta_f) - np.cos(theta))
ki_z = k * np.sin(theta)
kf_z = k * np.sin(theta_f)
_x = qx
_z = qz
if x_axis == EventReflectivity.DELTA_KZ_VS_QZ:
_x = (ki_z - kf_z)
elif x_axis == EventReflectivity.KZI_VS_KZF:
_x = ki_z
_z = kf_z
histo_weigths = wl_weights * _z / wl_list
_counts, _, _ = np.histogram2d(_x, _z, bins=[x_bins, z_bins], weights=histo_weigths)
refl += _counts
_counts, _, _ = np.histogram2d(_x, _z, bins=[x_bins, z_bins])
counts += _counts
bin_size = z_bins[1] - z_bins[0]
d_refl_sq = refl / np.sqrt(counts) / charge / bin_size
refl /= charge * bin_size
return refl, d_refl_sq
| apache-2.0 | -5,111,205,279,837,215,000 | 41.078341 | 143 | 0.544464 | false |
hajicj/safire | scripts/profile-training.py | 1 | 3750 | #!/usr/bin/env python
"""Testing script for the loader-setup-learner scenario. Runs a miniature
experiment."""
import argparse
import cProfile
import logging
import os
import pstats
import StringIO
from gensim import corpora
from safire.data.loaders import MultimodalDatasetLoader
from safire.learning.models.logistic_regression import LogisticRegression
from safire.learning.learners.base_sgd_learner import BaseSGDLearner
def profile_run(learner, model_handle, dataset):
pr = cProfile.Profile()
pr.enable()
learner.run(model_handle, dataset)
pr.disable()
s = StringIO.StringIO()
sortby='tottime'
ps = pstats.Stats(pr, stream = s).sort_stats(sortby)
ps.print_stats(.1)
return s.getvalue()
def main(args):
serializer = corpora.MmCorpus
if args.serializer:
if args.serializer == 'SvmLight':
serializer = corpora.SvmLightCorpus
elif args.serializer == 'Blei':
serializer = corpora.BleiCorpus
elif args.serializer == 'Low':
serializer = corpora.LowCorpus
elif serializer == 'Mm':
serializer = corpora.MmCorpus
logging.info('Initializing loader...')
loader = MultimodalDatasetLoader(args.root, args.name,
text_serializer=serializer)
logging.info('Loading dataset...')
dataset = loader.load(text_infix=args.text_label, img_infix=args.img_label)
dataset.set_mode(1)
logging.info('Setting up model...')
model_handle = LogisticRegression.setup(dataset, batch_size=args.batch_size)
logging.info('Setting up learner...')
learner = BaseSGDLearner(n_epochs=args.n_epochs, b_size=args.batch_size,
validation_frequency=args.validation_frequency)
logging.info('Running learner with profiling...')
profiler_results = profile_run(learner, model_handle, dataset)
print profiler_results
def build_argument_parser():
parser = argparse.ArgumentParser(description = __doc__, add_help=True)
parser.add_argument('-r', '--root', required=True,
help='The root dataset directory, passed to Loader.')
parser.add_argument('-n', '--name', required=True,
help='The name passed to Loader.')
parser.add_argument('--text_label', default=None, help='Text corpus label.')
parser.add_argument('--img_label', default=None, help='Image corpus label.')
parser.add_argument('-b', '--batch_size', type=int, default=1,
help='SGD batch size')
parser.add_argument('-e', '--n_epochs', type=int, default=5,
help='Number of SGD epochs.')
parser.add_argument('-f', '--validation_frequency', type=int, default=3,
help='Validation will be run once every -v batches.')
parser.add_argument('--serializer', help='Use this gensim.corpora class'+
' to load the serialized text corpora. Accepts: Mm,'+
' Blei, SVMlight, Low; defaults to MmCorpus')
parser.add_argument('-v', '--verbose', action='store_true',
help='Will output INFO messages.')
parser.add_argument('--debug', action='store_true',
help='Will output DEBUG messages.')
return parser
def _set_logging(args):
level = logging.WARN
if args.debug:
level = logging.DEBUG
elif args.verbose:
level = logging.INFO
logging.basicConfig(format='%(levelname)s : %(message)s', level=level)
####################################################
if __name__ == '__main__':
parser = build_argument_parser()
args = parser.parse_args()
_set_logging(args)
main(args)
| gpl-3.0 | 4,754,184,727,170,040,000 | 30.25 | 80 | 0.623467 | false |
peter17/pijnu | test/timer.py | 1 | 1850 | # -*- coding: utf8 -*-
from __future__ import print_function
'''
Copyright 2009 Denis Derman <[email protected]> (former developer)
Copyright 2011-2012 Peter Potrowl <[email protected]> (current developer)
This file is part of Pijnu.
Pijnu is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Pijnu is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with Pijnu. If not, see <http://www.gnu.org/licenses/>.
'''
'''
Overall timer func -- just case needed
'''
from time import time
__all__ = ['timer']
def timer(n, f, *args, **kw_args):
t0 = time()
for i in range(n):
f(*args, **kw_args)
t = time() - t0
arg_str = ','.join(repr(arg) for arg in args)
kw_arg_str = (',' + str(kw_args)[1:-1]) if kw_args else ''
print("%s(%s%s) %s time(s) <--> %0.3f s" \
% (f.__name__, arg_str, kw_arg_str, n, t))
########## test ##########
if __name__ == "__main__":
def sum2(x, y):
return x + y
timer(100000, sum2, 2, 3)
from math import sqrt
def mean(seq, geom=False):
def sum(seq):
sum = 0
for x in seq:
sum += x
return sum
if geom:
squares = (x * x for x in seq)
return sqrt(sum(squares))
else:
return sum(seq) / len(seq)
seq = [1, 2, 3, 4, 5, 6, 7, 8, 9]
timer(100000, mean, seq)
timer(100000, mean, seq, geom=True)
| gpl-3.0 | 4,233,844,960,566,852,600 | 26.61194 | 74 | 0.603243 | false |
AlexanderSk/fail2ban | fail2ban/server/jail.py | 1 | 6789 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
__author__ = "Cyril Jaquier, Lee Clemens, Yaroslav Halchenko"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2012 Lee Clemens, 2012 Yaroslav Halchenko"
__license__ = "GPL"
import Queue, logging
from .actions import Actions
# Gets the instance of the logger.
logSys = logging.getLogger(__name__)
class Jail:
"""Fail2Ban jail, which manages a filter and associated actions.
The class handles the initialisation of a filter, and actions. It's
role is then to act as an interface between the filter and actions,
passing bans detected by the filter, for the actions to then act upon.
Parameters
----------
name : str
Name assigned to the jail.
backend : str
Backend to be used for filter. "auto" will attempt to pick
the most preferred backend method. Default: "auto"
db : Fail2BanDb
Fail2Ban persistent database instance. Default: `None`
Attributes
----------
name
database
filter
actions
idle
status
"""
#Known backends. Each backend should have corresponding __initBackend method
# yoh: stored in a list instead of a tuple since only
# list had .index until 2.6
_BACKENDS = ['pyinotify', 'gamin', 'polling', 'systemd']
def __init__(self, name, backend = "auto", db=None):
self.__db = db
# 26 based on iptable chain name limit of 30 less len('f2b-')
if len(name) >= 26:
logSys.warning("Jail name %r might be too long and some commands "
"might not function correctly. Please shorten"
% name)
self.__name = name
self.__queue = Queue.Queue()
self.__filter = None
logSys.info("Creating new jail '%s'" % self.name)
self._setBackend(backend)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.name)
def _setBackend(self, backend):
backend = backend.lower() # to assure consistent matching
backends = self._BACKENDS
if backend != 'auto':
# we have got strict specification of the backend to use
if not (backend in self._BACKENDS):
logSys.error("Unknown backend %s. Must be among %s or 'auto'"
% (backend, backends))
raise ValueError("Unknown backend %s. Must be among %s or 'auto'"
% (backend, backends))
# so explore starting from it till the 'end'
backends = backends[backends.index(backend):]
for b in backends:
initmethod = getattr(self, '_init%s' % b.capitalize())
try:
initmethod()
if backend != 'auto' and b != backend:
logSys.warning("Could only initiated %r backend whenever "
"%r was requested" % (b, backend))
else:
logSys.info("Initiated %r backend" % b)
self.__actions = Actions(self)
return # we are done
except ImportError, e:
# Log debug if auto, but error if specific
logSys.log(
logging.DEBUG if backend == "auto" else logging.ERROR,
"Backend %r failed to initialize due to %s" % (b, e))
# log error since runtime error message isn't printed, INVALID COMMAND
logSys.error(
"Failed to initialize any backend for Jail %r" % self.name)
raise RuntimeError(
"Failed to initialize any backend for Jail %r" % self.name)
def _initPolling(self):
from filterpoll import FilterPoll
logSys.info("Jail '%s' uses poller" % self.name)
self.__filter = FilterPoll(self)
def _initGamin(self):
# Try to import gamin
from filtergamin import FilterGamin
logSys.info("Jail '%s' uses Gamin" % self.name)
self.__filter = FilterGamin(self)
def _initPyinotify(self):
# Try to import pyinotify
from filterpyinotify import FilterPyinotify
logSys.info("Jail '%s' uses pyinotify" % self.name)
self.__filter = FilterPyinotify(self)
def _initSystemd(self): # pragma: systemd no cover
# Try to import systemd
from filtersystemd import FilterSystemd
logSys.info("Jail '%s' uses systemd" % self.name)
self.__filter = FilterSystemd(self)
@property
def name(self):
"""Name of jail.
"""
return self.__name
@property
def database(self):
"""The database used to store persistent data for the jail.
"""
return self.__db
@property
def filter(self):
"""The filter which the jail is using to monitor log files.
"""
return self.__filter
@property
def actions(self):
"""Actions object used to manage actions for jail.
"""
return self.__actions
@property
def idle(self):
"""A boolean indicating whether jail is idle.
"""
return self.filter.idle or self.actions.idle
@idle.setter
def idle(self, value):
self.filter.idle = value
self.actions.idle = value
@property
def status(self):
"""The status of the jail.
"""
return [
("Filter", self.filter.status),
("Actions", self.actions.status),
]
def putFailTicket(self, ticket):
"""Add a fail ticket to the jail.
Used by filter to add a failure for banning.
"""
self.__queue.put(ticket)
if self.database is not None:
self.database.addBan(self, ticket)
def getFailTicket(self):
"""Get a fail ticket from the jail.
Used by actions to get a failure for banning.
"""
try:
return self.__queue.get(False)
except Queue.Empty:
return False
def start(self):
"""Start the jail, by starting filter and actions threads.
Once stated, also queries the persistent database to reinstate
any valid bans.
"""
self.filter.start()
self.actions.start()
# Restore any previous valid bans from the database
if self.database is not None:
for ticket in self.database.getBansMerged(
jail=self, bantime=self.actions.getBanTime()):
if not self.filter.inIgnoreIPList(ticket.getIP()):
self.__queue.put(ticket)
logSys.info("Jail '%s' started" % self.name)
def stop(self):
"""Stop the jail, by stopping filter and actions threads.
"""
self.filter.stop()
self.actions.stop()
self.filter.join()
self.actions.join()
logSys.info("Jail '%s' stopped" % self.name)
def is_alive(self):
"""Check jail "is_alive" by checking filter and actions threads.
"""
return self.filter.is_alive() or self.actions.is_alive()
| gpl-2.0 | 600,484,904,292,077,600 | 28.262931 | 98 | 0.689645 | false |
cprov/snapcraft | snapcraft/internal/elf.py | 2 | 26365 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import contextlib
import glob
import logging
import os
import re
import shutil
import subprocess
import tempfile
from typing import Dict, FrozenSet, List, Set, Sequence, Tuple, Union # noqa
import elftools.elf.elffile
from pkg_resources import parse_version
from snapcraft import file_utils
from snapcraft.internal import common, errors, os_release, repo
logger = logging.getLogger(__name__)
class NeededLibrary:
"""Represents an ELF library version."""
def __init__(self, *, name: str) -> None:
self.name = name
self.versions = set() # type: Set[str]
def add_version(self, version: str) -> None:
self.versions.add(version)
ElfArchitectureTuple = Tuple[str, str, str]
ElfDataTuple = Tuple[
ElfArchitectureTuple, str, str, Dict[str, NeededLibrary], bool
] # noqa: E501
SonameCacheDict = Dict[Tuple[ElfArchitectureTuple, str], str]
# Old pyelftools uses byte strings for section names. Some data is
# also returned as bytes, which is handled below.
if parse_version(elftools.__version__) >= parse_version("0.24"):
_DYNAMIC = ".dynamic" # type: Union[str, bytes]
_GNU_VERSION_R = ".gnu.version_r" # type: Union[str, bytes]
_INTERP = ".interp" # type: Union[str, bytes]
else:
_DYNAMIC = b".dynamic"
_GNU_VERSION_R = b".gnu.version_r"
_INTERP = b".interp"
class SonameCache:
"""A cache for sonames."""
def __getitem__(self, key):
return self._soname_paths[key]
def __setitem__(self, key, item):
# Initial API error checks
if not isinstance(key, tuple):
raise EnvironmentError(
"The key for SonameCache has to be a (arch, soname) tuple."
)
if not isinstance(key[0], tuple) or len(key[0]) != 3:
raise EnvironmentError(
"The first element of the key needs to of type ElfArchitectureTuple."
)
if not isinstance(key[1], str):
raise EnvironmentError(
"The second element of the key needs to be "
"of type str representing the soname."
)
self._soname_paths[key] = item
def __contains__(self, key):
return key in self._soname_paths
def __init__(self):
"""Initialize a cache for sonames"""
self._soname_paths = dict() # type: SonameCacheDict
def reset_except_root(self, root):
"""Reset the cache values that aren't contained within root."""
new_soname_paths = dict() # type: SonameCacheDict
for key, value in self._soname_paths.items():
if value is not None and value.startswith(root):
new_soname_paths[key] = value
self._soname_paths = new_soname_paths
class Library:
"""Represents the SONAME and path to the library."""
def __init__(
self,
*,
soname: str,
path: str,
root_path: str,
core_base_path: str,
arch: ElfArchitectureTuple,
soname_cache: SonameCache
) -> None:
self.soname = soname
# We need to always look for the soname inside root first,
# and after exhausting all options look in core_base_path.
if path.startswith(root_path):
self.path = path
else:
self.path = _crawl_for_path(
soname=soname,
root_path=root_path,
core_base_path=core_base_path,
arch=arch,
soname_cache=soname_cache,
)
if not self.path and path.startswith(core_base_path):
self.path = path
# Required for libraries on the host and the fetching mechanism
if not self.path:
self.path = path
system_libs = _get_system_libs()
if soname in system_libs:
self.system_lib = True
else:
self.system_lib = False
# self.path has the correct resulting path.
if self.path.startswith(core_base_path):
self.in_base_snap = True
else:
self.in_base_snap = False
def _crawl_for_path(
*,
soname: str,
root_path: str,
core_base_path: str,
arch: ElfArchitectureTuple,
soname_cache: SonameCache
) -> str:
# Speed things up and return what was already found once.
if (arch, soname) in soname_cache:
return soname_cache[arch, soname]
logger.debug("Crawling to find soname {!r}".format(soname))
for path in (root_path, core_base_path):
if not os.path.exists(path):
continue
for root, directories, files in os.walk(path):
for file_name in files:
if file_name == soname:
file_path = os.path.join(root, file_name)
if ElfFile.is_elf(file_path):
# We found a match by name, anyway. Let's verify that
# the architecture is the one we want.
elf_file = ElfFile(path=file_path)
if elf_file.arch == arch:
soname_cache[arch, soname] = file_path
return file_path
# If not found we cache it too
soname_cache[arch, soname] = None
return None
# Old versions of pyelftools return bytes rather than strings for
# certain APIs. So we pass those values through this function to get
# a consistent result.
def _ensure_str(s):
if isinstance(s, bytes):
return s.decode("ascii")
assert isinstance(s, str)
return s
class ElfFile:
"""ElfFile represents and elf file on a path and its attributes."""
@classmethod
def is_elf(cls, path: str) -> bool:
if not os.path.isfile(path):
# ELF binaries are regular files
return False
with open(path, "rb") as bin_file:
return bin_file.read(4) == b"\x7fELF"
def __init__(self, *, path: str) -> None:
"""Initialize an ElfFile instance.
:param str path: path to an elf_file within a snapcraft project.
"""
self.path = path
self.dependencies = set() # type: Set[Library]
elf_data = self._extract(path)
self.arch = elf_data[0]
self.interp = elf_data[1]
self.soname = elf_data[2]
self.needed = elf_data[3]
self.execstack_set = elf_data[4]
def _extract(self, path: str) -> ElfDataTuple: # noqa: C901
arch = None # type: ElfArchitectureTuple
interp = str()
soname = str()
libs = dict()
execstack_set = False
with open(path, "rb") as fp:
elf = elftools.elf.elffile.ELFFile(fp)
# A set of fields to identify the architecture of the ELF file:
# EI_CLASS: 32/64 bit (e.g. amd64 vs. x32)
# EI_DATA: byte orer (e.g. ppc64 vs. ppc64le)
# e_machine: instruction set (e.g. x86-64 vs. arm64)
#
# For amd64 binaries, this will evaluate to:
# ('ELFCLASS64', 'ELFDATA2LSB', 'EM_X86_64')
arch = (
elf.header.e_ident.EI_CLASS,
elf.header.e_ident.EI_DATA,
elf.header.e_machine,
)
# If we are processing a detached debug info file, these
# sections will be present but empty.
interp_section = elf.get_section_by_name(_INTERP)
if (
interp_section is not None
and interp_section.header.sh_type != "SHT_NOBITS"
):
interp = interp_section.data().rstrip(b"\x00").decode("ascii")
dynamic_section = elf.get_section_by_name(_DYNAMIC)
if (
dynamic_section is not None
and dynamic_section.header.sh_type != "SHT_NOBITS"
):
for tag in dynamic_section.iter_tags("DT_NEEDED"):
needed = _ensure_str(tag.needed)
libs[needed] = NeededLibrary(name=needed)
for tag in dynamic_section.iter_tags("DT_SONAME"):
soname = _ensure_str(tag.soname)
verneed_section = elf.get_section_by_name(_GNU_VERSION_R)
if (
verneed_section is not None
and verneed_section.header.sh_type != "SHT_NOBITS"
):
for library, versions in verneed_section.iter_versions():
library_name = _ensure_str(library.name)
# If the ELF file only references weak symbols
# from a library, it may be absent from DT_NEEDED
# but still have an entry in .gnu.version_r for
# symbol versions.
if library_name not in libs:
continue
lib = libs[library_name]
for version in versions:
lib.add_version(_ensure_str(version.name))
for segment in elf.iter_segments():
if segment["p_type"] == "PT_GNU_STACK":
# p_flags holds the bit mask for this segment.
# See `man 5 elf`.
mode = segment["p_flags"]
if mode & elftools.elf.constants.P_FLAGS.PF_X:
execstack_set = True
return arch, interp, soname, libs, execstack_set
def is_linker_compatible(self, *, linker_version: str) -> bool:
"""Determines if linker will work given the required glibc version."""
version_required = self.get_required_glibc()
r = parse_version(version_required) <= parse_version(linker_version)
logger.debug(
"Checking if linker {!r} will work with "
"GLIBC_{} required by {!r}: {!r}".format(
linker_version, version_required, self.path, r
)
)
return r
def get_required_glibc(self) -> str:
"""Returns the required glibc version for this ELF file."""
with contextlib.suppress(AttributeError):
return self._required_glibc # type: ignore
version_required = ""
for lib in self.needed.values():
for version in lib.versions:
if not version.startswith("GLIBC_"):
continue
version = version[6:]
if parse_version(version) > parse_version(version_required):
version_required = version
self._required_glibc = version_required
return version_required
def load_dependencies(
self, root_path: str, core_base_path: str, soname_cache: SonameCache = None
) -> Set[str]:
"""Load the set of libraries that are needed to satisfy elf's runtime.
This may include libraries contained within the project.
The object's .dependencies attribute is set after loading.
:param str root_path: the root path to search for missing dependencies.
:param str core_base_path: the core base path to search for missing
dependencies.
:param SonameCache soname_cache: a cache of previously search
dependencies.
:returns: a set of string with paths to the library dependencies of
elf.
"""
if soname_cache is None:
soname_cache = SonameCache()
logger.debug("Getting dependencies for {!r}".format(self.path))
ldd_out = [] # type: List[str]
try:
# ldd output sample:
# /lib64/ld-linux-x86-64.so.2 (0x00007fb3c5298000)
# libm.so.6 => /lib/x86_64-linux-gnu/libm.so.6 (0x00007fb3bef03000)
ldd_out = common.run_output(["ldd", self.path]).split("\n")
except subprocess.CalledProcessError:
logger.warning(
"Unable to determine library dependencies for {!r}".format(self.path)
)
return set()
ldd_out_split = [l.split() for l in ldd_out]
libs = set()
for ldd_line in ldd_out_split:
if len(ldd_line) > 2:
libs.add(
Library(
soname=ldd_line[0],
path=ldd_line[2],
root_path=root_path,
core_base_path=core_base_path,
arch=self.arch,
soname_cache=soname_cache,
)
)
self.dependencies = libs
# Return a set useful only for fetching libraries from the host
library_paths = set() # type: Set[str]
for l in libs:
if os.path.exists(l.path) and not l.in_base_snap and not l.system_lib:
library_paths.add(l.path)
return library_paths
class Patcher:
"""Patcher holds the necessary logic to patch elf files."""
def __init__(
self, *, dynamic_linker: str, root_path: str, preferred_patchelf_path=None
) -> None:
"""Create a Patcher instance.
:param str dynamic_linker: the path to the dynamic linker to set the
elf file to.
:param str root_path: the base path for the snap to determine
if use of $ORIGIN is possible.
:param str preferred_patchelf_path: patch the necessary elf_files with
this patchelf.
"""
self._dynamic_linker = dynamic_linker
self._root_path = root_path
if preferred_patchelf_path:
self._patchelf_cmd = preferred_patchelf_path
else:
self._patchelf_cmd = file_utils.get_tool_path("patchelf")
self._strip_cmd = file_utils.get_tool_path("strip")
def patch(self, *, elf_file: ElfFile) -> None:
"""Patch elf_file with the Patcher instance configuration.
If the ELF is executable, patch it to use the configured linker.
If the ELF has dependencies (DT_NEEDED), set an rpath to them.
:param ElfFile elf: a data object representing an elf file and its
relevant attributes.
:raises snapcraft.internal.errors.PatcherError:
raised when the elf_file cannot be patched.
"""
patchelf_args = []
if elf_file.interp:
patchelf_args.extend(["--set-interpreter", self._dynamic_linker])
if elf_file.dependencies:
rpath = self._get_rpath(elf_file)
# Due to https://github.com/NixOS/patchelf/issues/94 we need
# to first clear the current rpath
self._run_patchelf(
patchelf_args=["--remove-rpath"], elf_file_path=elf_file.path
)
# Parameters:
# --force-rpath: use RPATH instead of RUNPATH.
# --shrink-rpath: will remove unneeded entries, with the
# side effect of preferring host libraries
# so we simply do not use it.
# --set-rpath: set the RPATH to the colon separated argument.
patchelf_args.extend(["--force-rpath", "--set-rpath", rpath])
# no patchelf_args means there is nothing to do.
if not patchelf_args:
return
self._run_patchelf(patchelf_args=patchelf_args, elf_file_path=elf_file.path)
def _run_patchelf(self, *, patchelf_args: List[str], elf_file_path: str) -> None:
try:
return self._do_run_patchelf(
patchelf_args=patchelf_args, elf_file_path=elf_file_path
)
except errors.PatcherError as patch_error:
# This is needed for patchelf to properly work with
# go binaries (LP: #1736861).
# We do this here instead of the go plugin for two reasons, the
# first being that we do not want to blindly remove the section,
# only doing it when necessary, and the second, this logic
# should eventually be removed once patchelf catches up.
try:
logger.warning(
"Failed to update {!r}. Retrying after stripping "
"the .note.go.buildid from the elf file.".format(elf_file_path)
)
subprocess.check_call(
[
self._strip_cmd,
"--remove-section",
".note.go.buildid",
elf_file_path,
]
)
except subprocess.CalledProcessError:
logger.warning(
"Could not properly strip .note.go.buildid "
"from {!r}.".format(elf_file_path)
)
raise patch_error
return self._do_run_patchelf(
patchelf_args=patchelf_args, elf_file_path=elf_file_path
)
def _do_run_patchelf(self, *, patchelf_args: List[str], elf_file_path: str) -> None:
# Run patchelf on a copy of the primed file and replace it
# after it is successful. This allows us to break the potential
# hard link created when migrating the file across the steps of
# the part.
with tempfile.NamedTemporaryFile() as temp_file:
shutil.copy2(elf_file_path, temp_file.name)
cmd = [self._patchelf_cmd] + patchelf_args + [temp_file.name]
try:
subprocess.check_call(cmd)
# There is no need to catch FileNotFoundError as patchelf should be
# bundled with snapcraft which means its lack of existence is a
# "packager" error.
except subprocess.CalledProcessError as call_error:
patchelf_version = (
subprocess.check_output([self._patchelf_cmd, "--version"])
.decode()
.strip()
)
# 0.10 is the version where patching certain binaries will
# work (currently known affected packages are mostly built
# with go).
if parse_version(patchelf_version) < parse_version("0.10"):
raise errors.PatcherNewerPatchelfError(
elf_file=elf_file_path,
process_exception=call_error,
patchelf_version=patchelf_version,
)
else:
raise errors.PatcherGenericError(
elf_file=elf_file_path, process_exception=call_error
)
# We unlink to break the potential hard link
os.unlink(elf_file_path)
shutil.copy2(temp_file.name, elf_file_path)
def _get_existing_rpath(self, elf_file_path):
output = subprocess.check_output(
[self._patchelf_cmd, "--print-rpath", elf_file_path]
)
return output.decode().strip().split(":")
def _get_rpath(self, elf_file) -> str:
origin_rpaths = list() # type: List[str]
base_rpaths = set() # type: Set[str]
existing_rpaths = self._get_existing_rpath(elf_file.path)
for dependency in elf_file.dependencies:
if dependency.path:
if dependency.in_base_snap:
base_rpaths.add(os.path.dirname(dependency.path))
elif dependency.path.startswith(self._root_path):
rel_library_path = os.path.relpath(dependency.path, elf_file.path)
rel_library_path_dir = os.path.dirname(rel_library_path)
# return the dirname, with the first .. replace
# with $ORIGIN
origin_rpath = rel_library_path_dir.replace("..", "$ORIGIN", 1)
if origin_rpath not in origin_rpaths:
origin_rpaths.append(origin_rpath)
if existing_rpaths:
# Only keep those that mention origin and are not already in our
# bundle.
existing_rpaths = [
r for r in existing_rpaths if "$ORIGIN" in r and r not in origin_rpaths
]
origin_rpaths = existing_rpaths + origin_rpaths
origin_paths = ":".join((r for r in origin_rpaths if r))
core_base_rpaths = ":".join(base_rpaths)
if origin_paths and core_base_rpaths:
return "{}:{}".format(origin_paths, core_base_rpaths)
elif origin_paths and not core_base_rpaths:
return origin_paths
else:
return core_base_rpaths
def determine_ld_library_path(root: str) -> List[str]:
"""Determine additional library paths needed for the linker loader.
This is a workaround until full library searching is implemented which
works by searching for ld.so.conf in specific hard coded locations
within root.
:param root str: the root directory to search for specific ld.so.conf
entries.
:returns: a list of strings of library paths where relevant libraries
can be found within root.
"""
# If more ld.so.conf files need to be supported, add them here.
ld_config_globs = {"{}/usr/lib/*/mesa*/ld.so.conf".format(root)}
ld_library_paths = []
for this_glob in ld_config_globs:
for ld_conf_file in glob.glob(this_glob):
ld_library_paths.extend(_extract_ld_library_paths(ld_conf_file))
return [root + path for path in ld_library_paths]
def _extract_ld_library_paths(ld_conf_file: str) -> List[str]:
# From the ldconfig manpage, paths can be colon-, space-, tab-, newline-,
# or comma-separated.
path_delimiters = re.compile(r"[:\s,]")
comments = re.compile(r"#.*$")
paths = []
with open(ld_conf_file, "r") as f:
for line in f:
# Remove comments from line
line = comments.sub("", line).strip()
if line:
paths.extend(path_delimiters.split(line))
return paths
_libraries = None
def _get_system_libs() -> FrozenSet[str]:
global _libraries
if _libraries: # type: ignore
return _libraries # type: ignore
lib_path = None
release = os_release.OsRelease()
with contextlib.suppress(errors.OsReleaseVersionIdError):
lib_path = os.path.join(common.get_librariesdir(), release.version_id())
if not lib_path or not os.path.exists(lib_path):
logger.debug("Only excluding libc libraries from the release")
libc6_libs = [
os.path.basename(l) for l in repo.Repo.get_package_libraries("libc6")
]
_libraries = frozenset(libc6_libs)
else:
with open(lib_path) as fn:
_libraries = frozenset(fn.read().split())
return _libraries
def get_elf_files(root: str, file_list: Sequence[str]) -> FrozenSet[ElfFile]:
"""Return a frozenset of elf files from file_list prepended with root.
:param str root: the root directory from where the file_list is generated.
:param file_list: a list of file in root.
:returns: a frozentset of ElfFile objects.
"""
elf_files = set() # type: Set[ElfFile]
for part_file in file_list:
# Filter out object (*.o) files-- we only care about binaries.
if part_file.endswith(".o"):
continue
# No need to crawl links-- the original should be here, too.
path = os.path.join(root, part_file) # type: str
if os.path.islink(path):
logger.debug("Skipped link {!r} while finding dependencies".format(path))
continue
# Finally, make sure this is actually an ELF file
if ElfFile.is_elf(path):
elf_file = ElfFile(path=path)
# if we have dyn symbols we are dynamic
if elf_file.needed:
elf_files.add(elf_file)
return frozenset(elf_files)
def _get_dynamic_linker(library_list: List[str]) -> str:
"""Return the dynamic linker from library_list."""
regex = re.compile(r"(?P<dynamic_linker>ld-[\d.]+.so)$")
for library in library_list:
m = regex.search(os.path.basename(library))
if m:
return library
raise RuntimeError(
"The format for the linker should be of the form "
"<root>/ld-<X>.<Y>.so. There are no matches for the "
"current libc6 package"
)
def find_linker(*, root_path: str, snap_base_path: str) -> str:
"""Find and return the dynamic linker that would be seen at runtime.
:param str root_path: the root path of a snap tree.
:param str snap_base_path: absolute path to the snap once installed to
setup proper rpaths.
:returns: the path to the dynamic linker to use
"""
# We assume the current system will satisfy the GLIBC requirement,
# get the current libc6 libraries (which includes the linker)
libc6_libraries_list = repo.Repo.get_package_libraries("libc6")
# For security reasons, we do not want to automatically pull in
# libraries but expect them to be consciously brought in by stage-packages
# instead.
libc6_libraries_paths = [
os.path.join(root_path, l[1:]) for l in libc6_libraries_list
]
dynamic_linker = _get_dynamic_linker(libc6_libraries_paths)
# Get the path to the "would be" dynamic linker when this snap is
# installed. Strip the root_path from the retrieved dynamic_linker
# variables + the leading `/` so that os.path.join can perform the
# proper join with snap_base_path.
dynamic_linker_path = os.path.join(
snap_base_path, dynamic_linker[len(root_path) + 1 :]
)
return dynamic_linker_path
| gpl-3.0 | 5,716,842,983,401,830,000 | 36.935252 | 88 | 0.573943 | false |
smoitra87/gerbil | deepnet/impute.py | 1 | 24315 | """Computes partition function for RBM-like models using Annealed Importance Sampling."""
import numpy as np
from deepnet import dbm
from deepnet import util
from deepnet import trainer as tr
from choose_matrix_library import *
import sys
import numpy as np
import pdb
import time
import itertools
import matplotlib.pyplot as plt
from deepnet import visualize
import deepnet
import scipy.io as sio
def LogMeanExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).mean())
def LogSumExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).sum())
def Display(w, hid_state, input_state, w_var=None, x_axis=None):
w = w.asarray().flatten()
plt.figure(1)
plt.clf()
plt.hist(w, 100)
visualize.display_hidden(hid_state.asarray(), 2, 'activations', prob=True)
# plt.figure(3)
# plt.clf()
# plt.imshow(hid_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
# plt.figure(4)
# plt.clf()
# plt.imshow(input_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
#, state.shape[0], state.shape[1], state.shape[0], 3, title='Markov chains')
# plt.tight_layout(pad=0, w_pad=0, h_pad=0)
# plt.figure(5)
# plt.clf()
# plt.suptitle('Variance')
# plt.plot(np.array(x_axis), np.array(w_var))
# plt.draw()
def impute_dbm_ais(model):
"""Run approximate pll using AIS on a DBM """
def impute_rbm_gaussian_exact(model):
""" run exact exact pll and imputation error on an rbm """
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layer = model.GetLayerByName('bernoulli_hidden1')
bern2_hidden_layer = model.GetLayerByName('bernoulli2_hidden1')
gaussian_layer = model.GetLayerByName('gaussian_hidden1')
# Get input layer features
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
layer.bar = layer.deriv
zeroslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
onesrow = cm.CUDAMatrix(np.ones([1,\
batchsize]))
batchslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchzeroslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchslice2 = cm.CUDAMatrix(np.zeros([1, batchsize]))
datasize_squared = cm.CUDAMatrix(np.zeros([batchsize, batchsize]))
datasize_eye = cm.CUDAMatrix(np.eye(batchsize))
datasize_eye2 = cm.CUDAMatrix(np.eye(batchsize))
if hidden_layer:
hidden_bias = hidden_layer.params['bias']
bedge = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'bernoulli_hidden1')
w = bedge.params['weight']
if bern2_hidden_layer:
bern2_hidden_bias = bern2_hidden_layer.params['bias']
bedge2 = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'bernoulli2_hidden1')
w2 = bedge2.params['weight']
if 'bias' in input_layer.params:
input_bias = input_layer.params['bias']
if gaussian_layer:
gedge = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'gaussian_hidden1')
gw = gedge.params['weight']
input_diag = input_layer.params['diag']
diag_val = input_diag.sum() / (input_layer.dimensions * input_layer.numlabels)
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
dim_offset = dim_idx * numlabels
for label_idx in range(numlabels):
batchslice.assign(batchzeroslice)
#Assign state value
label_offset = dim_idx * numlabels + label_idx
input_layer.state.set_row_slice(dim_offset, dim_offset + numlabels, \
zeroslice)
input_layer.state.set_row_slice(label_offset, label_offset+1, onesrow)
if hidden_layer:
# Add the contributions from bernoulli hidden layer
cm.dot(w.T, input_layer.state, target=hidden_layer.state)
hidden_layer.state.add_col_vec(hidden_bias)
cm.log_1_plus_exp(hidden_layer.state)
hidden_layer.state.sum(axis=0, target=batchslice)
if bern2_hidden_layer:
# Add the contributions from bernoulli hidden layer
cm.dot(w2.T, input_layer.state, target=bern2_hidden_layer.state)
bern2_hidden_layer.state.add_col_vec(bern2_hidden_bias)
cm.log_1_plus_exp(bern2_hidden_layer.state)
batchslice.add_sums(bern2_hidden_layer.state, axis=0)
if 'bias' in input_layer.params:
cm.dot(input_bias.T, input_layer.state, target=batchslice2)
batchslice.add_row_vec(batchslice2)
if gaussian_layer:
# Add contributions from gaussian hidden layer
cm.dot(gw.T, input_layer.state, target=gaussian_layer.state)
cm.dot(gaussian_layer.state.T, gaussian_layer.state, target= datasize_squared)
datasize_squared.mult(datasize_eye, target=datasize_eye2)
datasize_eye2.sum(axis=0, target=batchslice2)
# Add constants from gaussian hidden layer
integration_constant = gaussian_layer.dimensions * np.log(2*np.pi)
integration_constant += input_layer.dimensions * diag_val
batchslice2.add(integration_constant)
batchslice2.mult(0.5)
batchslice.add_row_vec(batchslice2)
input_layer.foo.set_row_slice(label_offset, label_offset+1, batchslice)
# Apply softmax on log Z_v as energies
input_layer.foo.reshape((numlabels, dimensions * batchsize))
input_layer.foo.apply_softmax()
data.reshape((1, dimensions * batchsize))
# Calculate Imputation Error
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_correct(data, target=input_layer.batchsize_temp)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
imperr_cpu = (dimensions - input_layer.batchsize_temp.sum(axis=0).asarray() )/ (0. + dimensions)
# Calculate Pseudo ll
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_cross_entropy(data, target=input_layer.batchsize_temp, \
tiny=input_layer.tiny)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
pll_cpu = - input_layer.batchsize_temp.sum(axis=0).asarray()
# Undo rehapes
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
zeroslice.free_device_memory()
onesrow.free_device_memory()
batchslice.free_device_memory()
return pll_cpu, imperr_cpu
def impute_rbm_exact(model):
""" run exact exact pll and imputation error on an rbm """
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layer = model.GetLayerByName('hidden1')
# Get input layer features
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
layer.bar = layer.deriv
zeroslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
onesrow = cm.CUDAMatrix(np.ones([1,\
batchsize]))
batchslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchslice2 = cm.CUDAMatrix(np.zeros([1, batchsize]))
hidden_bias = hidden_layer.params['bias']
input_bias = input_layer.params['bias']
edge = model.edge[0]
w = edge.params['weight']
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
dim_offset = dim_idx * numlabels
for label_idx in range(numlabels):
#Assign state value
label_offset = dim_idx * numlabels + label_idx
input_layer.state.set_row_slice(dim_offset, dim_offset + numlabels, \
zeroslice)
input_layer.state.set_row_slice(label_offset, label_offset+1, onesrow)
cm.dot(w.T, input_layer.state, target=hidden_layer.state)
hidden_layer.state.add_col_vec(hidden_bias)
cm.log_1_plus_exp(hidden_layer.state)
hidden_layer.state.sum(axis=0, target=batchslice)
cm.dot(input_bias.T, input_layer.state, target=batchslice2)
batchslice.add_row_vec(batchslice2)
input_layer.foo.set_row_slice(label_offset, label_offset+1, batchslice)
# Apply softmax on log Z_v as energies
input_layer.foo.reshape((numlabels, dimensions * batchsize))
input_layer.foo.apply_softmax()
data.reshape((1, dimensions * batchsize))
# Calculate Imputation Error
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_correct(data, target=input_layer.batchsize_temp)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
imperr_cpu = (dimensions - input_layer.batchsize_temp.sum(axis=0).asarray() )/ (0. + dimensions)
# Calculate Pseudo ll
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_cross_entropy(data, target=input_layer.batchsize_temp, \
tiny=input_layer.tiny)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
pll_cpu = - input_layer.batchsize_temp.sum(axis=0).asarray()
# Undo rehapes
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
zeroslice.free_device_memory()
onesrow.free_device_memory()
batchslice.free_device_memory()
return pll_cpu, imperr_cpu
def impute_mf(model, mf_steps, hidden_mf_steps, **opts):
# Initialize stuff
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layers = []
for layer in model.layer:
if not layer.is_input:
hidden_layers.append(layer)
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
input_layer.fooslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.barslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
pll = cm.CUDAMatrix(np.zeros([1, batchsize]))
imputation_err = cm.CUDAMatrix(np.zeros([1, batchsize]))
input_layer.biasslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.biasslice.apply_softmax()
# INITIALIZE TO UNIFORM RANDOM for all layers except clamped layers
for layer in model.layer:
layer.state.assign(0)
layer.ApplyActivation()
def reshape_softmax(enter=True):
if enter:
input_layer.state.reshape((numlabels, dimensions * batchsize))
input_layer.foo.reshape((numlabels, dimensions * batchsize))
data.reshape((1, dimensions * batchsize))
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
else:
input_layer.state.reshape((numlabels * dimensions, batchsize))
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
input_layer.batchsize_temp.reshape((dimensions, batchsize))
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
offset = dim_idx * numlabels
input_layer.state.set_row_slice(offset, offset + numlabels, \
input_layer.biasslice)
for layer in model.layer:
if not layer.is_input:
layer.state.assign(0)
# Run MF steps
for mf_idx in range(mf_steps):
for hid_mf_idx in range(hidden_mf_steps):
for layer in hidden_layers:
model.ComputeUp(layer, train=False, compute_input=False, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
model.ComputeUp(input_layer, train=False, compute_input=True, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
input_layer.state.get_row_slice(offset, offset + numlabels , \
target=input_layer.fooslice)
input_layer.GetData()
input_layer.state.set_row_slice(offset, offset + numlabels , \
input_layer.fooslice)
# Calculate pll
reshape_softmax(enter=True)
input_layer.state.get_softmax_cross_entropy(data,\
target=input_layer.batchsize_temp, tiny=input_layer.tiny)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
pll.add_sums(input_layer.barslice, axis=0)
# Calculate imputation error
if 'blosum90' in opts:
reshape_softmax(enter=True)
input_layer.state.get_softmax_blosum90(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0)
else:
reshape_softmax(enter=True)
input_layer.state.get_softmax_correct(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0, mult=-1.)
imputation_err.add(1.)
#--------------------------------------
# free device memory for newly created arrays
pll_cpu = -pll.asarray()
imperr_cpu = imputation_err.asarray()
imperr_cpu /= (dimensions+0.)
input_layer.fooslice.free_device_memory()
input_layer.biasslice.free_device_memory()
input_layer.barslice.free_device_memory()
pll.free_device_memory()
imputation_err.free_device_memory()
return pll_cpu, imperr_cpu
def multicol_mf(model, multicols, **opts):
# Initialize stuff
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layers = []
for layer in model.layer:
if not layer.is_input:
hidden_layers.append(layer)
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
input_layer.fooslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.barslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
pll = cm.CUDAMatrix(np.zeros([1, batchsize]))
imputation_err = cm.CUDAMatrix(np.zeros([1, batchsize]))
input_layer.biasslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.biasslice.apply_softmax()
# Get the multicol dimensions
nBlocks, nCols = multicols.shape
# INITIALIZE TO UNIFORM RANDOM for all layers except clamped layers
for layer in model.layer:
layer.state.assign(0)
layer.ApplyActivation()
def reshape_softmax(enter=True):
if enter:
input_layer.state.reshape((numlabels, dimensions * batchsize))
input_layer.foo.reshape((numlabels, dimensions * batchsize))
data.reshape((1, dimensions * batchsize))
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
else:
input_layer.state.reshape((numlabels * dimensions, batchsize))
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
input_layer.batchsize_temp.reshape((dimensions, batchsize))
# RUN Imputation Error
for mult_idx in range(nBlocks):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
offset = dim_idx * numlabels
input_layer.state.set_row_slice(offset, offset + numlabels, \
input_layer.biasslice)
for layer in model.layer:
if not layer.is_input:
layer.state.assign(0)
for layer in hidden_layers:
model.ComputeUp(layer, train=False, compute_input=False, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
model.ComputeUp(input_layer, train=False, compute_input=True, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
# Calculate pll
reshape_softmax(enter=True)
input_layer.state.get_softmax_cross_entropy(data,\
target=input_layer.batchsize_temp, tiny=input_layer.tiny)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
pll.add_sums(input_layer.barslice, axis=0)
# Calculate imputation error
if 'blosum90' in opts:
reshape_softmax(enter=True)
input_layer.state.get_softmax_blosum90(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0)
else:
reshape_softmax(enter=True)
input_layer.state.get_softmax_correct(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0, mult=-1.)
imputation_err.add(1.)
#--------------------------------------
# free device memory for newly created arrays
pll_cpu = -pll.asarray()
imperr_cpu = imputation_err.asarray()
imperr_cpu /= (nBlocks * nCols +0.)
input_layer.fooslice.free_device_memory()
input_layer.biasslice.free_device_memory()
input_layer.barslice.free_device_memory()
pll.free_device_memory()
imputation_err.free_device_memory()
return pll_cpu, imperr_cpu
def Usage():
print '%s <model file> <number of Markov chains to run> [number of words (for Replicated Softmax models)]'
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description="Run AIS")
parser.add_argument("--model_file", type=str)
parser.add_argument("--train_file", type=str)
parser.add_argument("--infer-method", type=str, default='exact', \
help='mf/gibbs/exact/gaussian_exact')
parser.add_argument("--mf-steps", type=int, default=1)
parser.add_argument("--hidden-mf-steps", type=int, default=1)
parser.add_argument("--outf", type=str, help='Output File')
parser.add_argument("--valid_only", action='store_true', help="only run the validation set")
parser.add_argument("--blosum90", action='store_true', help="Calculate blosum90 scores")
parser.add_argument("--ncols", type=int, help="Number of multiple columns")
parser.add_argument("--multmode", type=str, help="Multicol mode",default='rand')
args = parser.parse_args()
if not args.outf :
raise ValueError('Output file not defined')
if not args.train_file or not args.model_file :
raise ValueError('Models and data missing')
board = tr.LockGPU()
model_file = args.model_file
train_file = args.train_file
model = dbm.DBM(model_file, train_file)
trainer_pb = util.ReadOperation(train_file)
dataset = os.path.basename(trainer_pb.data_proto_prefix)
# Fix paths
dirname = os.path.split(model.t_op.data_proto_prefix)[1]
model.t_op.data_proto_prefix = os.path.join('datasets/',\
dirname)
model.t_op.skip_last_piece = False
model.t_op.get_last_piece = True
model.t_op.randomize = False
model.LoadModelOnGPU()
model.SetUpData()
if args.valid_only:
data_types = ['valid']
else:
data_types = ['train', 'valid', 'test']
datagetters = {
'train' : model.GetTrainBatch,
'valid' : model.GetValidationBatch,
'test' : model.GetTestBatch
}
batchsizes = {
'train' : model.train_data_handler.num_batches,
'valid' : model.validation_data_handler.num_batches,
'test' : model.test_data_handler.num_batches
}
opts = {}
cm.CUDAMatrix.init_random(seed=int(time.time()))
if len(model.layer) > 2 and args.infer_method=='exact':
raise ValueError('Cannot use exact Exact inference for DBMs')
from collections import defaultdict
pll_data = defaultdict(list)
imperr_data = defaultdict(list)
for data_type in data_types:
num_batches = batchsizes[data_type]
datagetter = datagetters[data_type]
for batch_idx in range(num_batches):
print("Evalutating batch {}".format(batch_idx+1))
datagetter()
if args.infer_method == 'mf':
if args.blosum90:
pll, imperr = impute_mf(model, args.mf_steps, args.hidden_mf_steps, blosum90=True)
else:
pll, imperr = impute_mf(model, args.mf_steps, args.hidden_mf_steps)
elif args.infer_method == 'multicol':
ncols = args.ncols;
multicol_file = 'datasets/{0}/multicol/{1}_{2}.mat'.format(dataset,args.multmode, ncols)
multicols = sio.loadmat(multicol_file)['multicols']
multicols = np.asarray(multicols, dtype=np.int)
multicols = multicols - 1; # convert from matlab indexing
if args.blosum90:
pll, imperr = multicol_mf(model, multicols, blosum90=True)
else:
pll, imperr = multicol_mf(model, multicols)
elif args.infer_method == 'exact':
pll, imperr = impute_rbm_exact(model)
elif args.infer_method == 'gaussian_exact':
pll, imperr = impute_rbm_gaussian_exact(model)
else:
raise ValueError("Unknown infer method")
pll, imperr = pll.flatten(), imperr.flatten()
pll_data[data_type].append(pll)
imperr_data[data_type].append(imperr)
pll_data[data_type] = np.concatenate(pll_data[data_type])
imperr_data[data_type] = np.concatenate(imperr_data[data_type])
#-------------------------------------------------------------------
# Print and save the results
for dtype in pll_data :
pll = pll_data[dtype]
imperr = imperr_data[dtype]
print '%s : Pseudo-LogLikelihood %.5f, std %.5f' % (dtype, pll.mean(), pll.std())
print '%s : Imputation Error %.5f, std %.5f' % (dtype, imperr.mean(), imperr.std())
tr.FreeGPU(board)
import pickle
with open(args.outf,'wb') as fout:
pkldata = { 'pll' : pll_data, 'imperr' : imperr_data }
pickle.dump(pkldata, fout)
| bsd-3-clause | 1,073,403,383,934,571,500 | 37.904 | 110 | 0.618096 | false |
dls-controls/pymalcolm | tests/test_modules/test_ca/test_caactionpart.py | 1 | 3498 | import unittest
from mock import Mock, patch
from malcolm.core import Process
from malcolm.modules.builtin.controllers import StatefulController
from malcolm.modules.ca.parts import CAActionPart
class caint(int):
ok = True
class ca_nothing(object):
ok = False
@patch("malcolm.modules.ca.util.catools")
class TestCAActionPart(unittest.TestCase):
def create_part(self, params=None):
if params is None:
params = dict(
name="mname",
description="desc",
pv="pv",
)
p = CAActionPart(**params)
p.setup(Mock())
return p
def test_init(self, catools):
p = self.create_part()
assert p.pv == "pv"
assert p.value == 1
assert p.wait is True
assert p.description == "desc"
p.registrar.add_method_model.assert_called_once_with(p.caput, "mname", "desc")
def test_reset(self, catools):
catools.ca_nothing = ca_nothing
p = self.create_part()
catools.caget.reset_mock()
catools.caget.return_value = [caint(4)]
p.reconnect()
catools.caget.assert_called_with(["pv"], throw=True)
def test_caput(self, catools):
p = self.create_part()
catools.caput.reset_mock()
p.caput()
catools.caput.assert_called_once_with("pv", 1, wait=True, timeout=None)
def make_camonitor_return(self, catools, values):
if not isinstance(values, (list, tuple)):
values = [values]
def side_effect(pv, cb, **kwargs):
for v in values:
cb(v)
return Mock()
catools.camonitor.side_effect = side_effect
def test_caput_status_pv_ok(self, catools):
p = self.create_part(
dict(
name="mname",
description="desc",
pv="pv",
status_pv="spv",
good_status="All Good",
)
)
self.make_camonitor_return(catools, ["Still going", "All Good"])
p.caput()
def test_caput_status_pv_no_good(self, catools):
p = self.create_part(
dict(
name="mname",
description="desc",
pv="pv",
status_pv="spv",
good_status="All Good",
)
)
self.make_camonitor_return(catools, "No Good")
with self.assertRaises(AssertionError) as cm:
p.caput()
assert str(cm.exception) == "Status No Good: while performing 'caput pv 1'"
def test_caput_status_pv_message(self, catools):
p = self.create_part(
dict(
name="mname",
description="desc",
pv="pv",
status_pv="spv",
good_status="All Good",
message_pv="mpv",
)
)
catools.caget.return_value = [caint(4)]
c = StatefulController("mri")
c.add_part(p)
proc = Process("proc")
proc.add_controller(c)
proc.start()
self.addCleanup(proc.stop)
b = proc.block_view("mri")
self.make_camonitor_return(catools, "No Good")
catools.caget.return_value = "Bad things happened"
with self.assertRaises(AssertionError) as cm:
b.mname()
assert (
str(cm.exception) == "Status No Good: Bad things happened: "
"while performing 'caput pv 1'"
)
| apache-2.0 | 7,719,074,021,969,940,000 | 28.15 | 86 | 0.534305 | false |
rcbops/opencenter | opencenter/db/migrate_repo/versions/001_initial_data.py | 1 | 6716 | #!/usr/bin/env python
# OpenCenter(TM) is Copyright 2013 by Rackspace US, Inc.
##############################################################################
#
# OpenCenter is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. This
# version of OpenCenter includes Rackspace trademarks and logos, and in
# accordance with Section 6 of the License, the provision of commercial
# support services in conjunction with a version of OpenCenter which includes
# Rackspace trademarks and logos is prohibited. OpenCenter source code and
# details are available at: # https://github.com/rcbops/opencenter or upon
# written request.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 and a copy, including this
# notice, is available in the LICENSE file accompanying this software.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the # specific language governing permissions and limitations
# under the License.
#
##############################################################################
import json
import os
from sqlalchemy import *
from migrate import *
from opencenter.db.api import api_from_models
adventures = [
{'name': 'Run Chef',
'dsl': 'run_chef.json',
'criteria': 'run_chef.criteria'},
{'name': 'Install Chef Server',
'dsl': 'install_chef_server.json',
'criteria': 'install_chef_server.criteria'},
{'name': 'Create Nova Cluster',
'dsl': 'create_nova_cluster.json',
'criteria': 'create_nova_cluster.criteria'},
{'name': 'Enable HA Infrastructure',
'dsl': 'enable_ha_infrastructure.json',
'criteria': 'enable_ha_infrastructure.criteria'},
{'name': 'Download Chef Cookbooks',
'dsl': 'download_cookbooks.json',
'criteria': 'download_cookbooks.criteria'},
{'name': 'Subscribe Cookbook Channel',
'dsl': 'subscribe_cookbook_channel.json',
'criteria': 'subscribe_cookbook_channel.criteria'},
{'name': 'Sleep',
'dsl': 'sleep.json',
'criteria': 'sleep.criteria'},
{'name': 'Update Server',
'dsl': 'update_server.json',
'criteria': 'update_server.criteria'},
{'name': 'Update Agent',
'dsl': 'update_agent.json',
'criteria': 'update_agent.criteria'},
{'name': 'Create Availability Zone',
'dsl': 'create_az.json',
'criteria': 'create_az.criteria'},
{'name': 'Disable Scheduling on this Host',
'dsl': 'openstack_disable_host.json',
'criteria': 'openstack_disable_host.criteria'},
{'name': 'Enable Scheduling on this Host',
'dsl': 'openstack_enable_host.json',
'criteria': 'openstack_enable_host.criteria'},
{'name': 'Evacuate Host',
'dsl': 'openstack_evacuate_host.json',
'criteria': 'openstack_evacuate_host.criteria'},
{'name': 'Upload Initial Glance Images',
'dsl': 'openstack_upload_images.json',
'criteria': 'openstack_upload_images.criteria'},
{'name': 'Install Chef Client',
'dsl': 'install_chef.json',
'criteria': 'install_chef.criteria'},
{'name': 'Uninstall Chef Client',
'dsl': 'uninstall_chef.json',
'criteria': 'uninstall_chef.criteria'},
{'name': 'Uninstall Chef Server',
'dsl': 'uninstall_chef_server.json',
'criteria': 'uninstall_chef_server.criteria'}]
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
api = api_from_models()
for adventure in adventures:
new_adventure = {'name': adventure['name']}
json_path = os.path.join(
os.path.dirname(__file__), adventure['dsl'])
criteria_path = os.path.join(
os.path.dirname(__file__), adventure['criteria'])
new_adventure['dsl'] = json.loads(open(json_path).read())
new_adventure['criteria'] = open(criteria_path).read()
api.adventure_create(new_adventure)
canned_filters = [{'name': 'unprovisioned nodes',
'filter_type': 'node',
'expr': 'backend=\'unprovisioned\''},
{'name': 'chef client nodes',
'filter_type': 'node',
'expr': 'backend=\'chef-client\''},
{'name': 'chef-server',
'filter_type': 'interface',
'expr': 'facts.chef_server_uri != None and '
'facts.chef_server_pem != None'}]
for new_filter in canned_filters:
api._model_create('filters', new_filter)
workspace = api.node_create({'name': 'workspace'})
api._model_create('attrs', {'node_id': workspace['id'],
'key': 'json_schema_version',
'value': 1})
unprov = api.node_create({'name': 'unprovisioned'})
api._model_create('facts', {'node_id': unprov['id'],
'key': 'parent_id',
'value': workspace['id']})
support = api.node_create({'name': 'support'})
api._model_create('facts', {'node_id': support['id'],
'key': 'parent_id',
'value': workspace['id']})
# Add default fact to the default nodes
node_list = [(workspace, "Workspace"),
(unprov, "Available Nodes"),
(support, "Service Nodes")]
for node, display in node_list:
api.fact_create({'node_id': node['id'],
'key': 'backends',
'value': ["container", "node"]})
api.attr_create({'node_id': node['id'],
'key': 'display_name',
'value': display})
api.attr_create({'node_id': node['id'],
'key': 'locked',
'value': True})
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
api = api_from_models()
adventure_names = [x['name'] for x in adventures]
for name in adventure_names:
adventure_list = api._model_query('adventures', 'name="%s"' % name)
for adv in adventure_list:
api._model_delete_by_id('adventures', adv['id'])
node_list = ['"support"', '"unprovisioned"', '"workspace"']
for node in node_list:
tmp = api.nodes_query('name = %s' % node)
fact_list = api.facts_query('node_id = %s' % tmp[0]['id'])
for fact in fact_list:
api.fact_delete_by_id(fact['id'])
api.node_delete_by_id(tmp[0]['id'])
| apache-2.0 | 595,253,243,819,142,300 | 39.457831 | 79 | 0.570876 | false |
smn/blinky | blinky/slack/models.py | 1 | 1402 | from __future__ import unicode_literals
import requests
from django.db import models
class SlackWebhook(models.Model):
url = models.URLField()
username = models.CharField(max_length=255, null=True, blank=True)
icon_emoji = models.CharField(max_length=255, null=True, blank=True)
channel = models.CharField(max_length=255, null=True, blank=True)
apply_global = models.BooleanField(default=True)
limit_worker_types = models.ManyToManyField('core.WorkerType', blank=True)
is_active = models.BooleanField(default=True)
@classmethod
def for_worker_type(cls, worker_type):
return cls.objects.filter(
models.Q(apply_global=True) |
models.Q(limit_worker_types=worker_type)
).filter(is_active=True).distinct()
def slack_payload(self, text):
payload = {
'text': text
}
if self.username:
payload['username'] = self.username
if self.icon_emoji:
payload['icon_emoji'] = self.icon_emoji
if self.channel:
payload['channel'] = self.channel
return payload
def fire(self, text):
response = requests.post(self.url, headers={
'Content-Type': 'application/json',
}, json=self.slack_payload(text))
response.raise_for_status()
return response
def __unicode__(self):
return self.url
| bsd-3-clause | 4,312,822,045,934,085,600 | 31.604651 | 78 | 0.631954 | false |
yavalvas/yav_com | build/matplotlib/lib/matplotlib/mathtext.py | 1 | 111273 | r"""
:mod:`~matplotlib.mathtext` is a module for parsing a subset of the
TeX math syntax and drawing them to a matplotlib backend.
For a tutorial of its usage see :ref:`mathtext-tutorial`. This
document is primarily concerned with implementation details.
The module uses pyparsing_ to parse the TeX expression.
.. _pyparsing: http://pyparsing.wikispaces.com/
The Bakoma distribution of the TeX Computer Modern fonts, and STIX
fonts are supported. There is experimental support for using
arbitrary fonts, but results may vary without proper tweaking and
metrics for those fonts.
If you find TeX expressions that don't parse or render properly,
please email [email protected], but please check KNOWN ISSUES below first.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os, sys
from six import unichr
from math import ceil
try:
set
except NameError:
from sets import Set as set
import unicodedata
from warnings import warn
from numpy import inf, isinf
import numpy as np
import pyparsing
from pyparsing import Combine, Group, Optional, Forward, \
Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \
ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \
FollowedBy, Regex, ParserElement, QuotedString, ParseBaseException
# Enable packrat parsing
if (six.PY3 and
[int(x) for x in pyparsing.__version__.split('.')] < [2, 0, 0]):
warn("Due to a bug in pyparsing <= 2.0.0 on Python 3.x, packrat parsing "
"has been disabled. Mathtext rendering will be much slower as a "
"result. Install pyparsing 2.0.0 or later to improve performance.")
else:
ParserElement.enablePackrat()
from matplotlib.afm import AFM
from matplotlib.cbook import Bunch, get_realpath_and_stat, \
is_string_like, maxdict
from matplotlib.ft2font import FT2Font, FT2Image, KERNING_DEFAULT, LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING
from matplotlib.font_manager import findfont, FontProperties
from matplotlib._mathtext_data import latex_to_bakoma, \
latex_to_standard, tex2uni, latex_to_cmex, stix_virtual_fonts
from matplotlib import get_data_path, rcParams
import matplotlib.colors as mcolors
import matplotlib._png as _png
####################
##############################################################################
# FONTS
def get_unicode_index(symbol):
"""get_unicode_index(symbol) -> integer
Return the integer index (from the Unicode table) of symbol. *symbol*
can be a single unicode character, a TeX command (i.e. r'\pi'), or a
Type1 symbol name (i.e. 'phi').
"""
# From UTF #25: U+2212 minus sign is the preferred
# representation of the unary and binary minus sign rather than
# the ASCII-derived U+002D hyphen-minus, because minus sign is
# unambiguous and because it is rendered with a more desirable
# length, usually longer than a hyphen.
if symbol == '-':
return 0x2212
try:# This will succeed if symbol is a single unicode char
return ord(symbol)
except TypeError:
pass
try:# Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError:
message = """'%(symbol)s' is not a valid Unicode character or
TeX/Type1 symbol"""%locals()
raise ValueError(message)
def unichr_safe(index):
"""Return the Unicode character corresponding to the index,
or the replacement character if this is a narrow build of Python
and the requested character is outside the BMP."""
try:
return unichr(index)
except ValueError:
return unichr(0xFFFD)
class MathtextBackend(object):
"""
The base class for the mathtext backend-specific code. The
purpose of :class:`MathtextBackend` subclasses is to interface
between mathtext and a specific matplotlib graphics backend.
Subclasses need to override the following:
- :meth:`render_glyph`
- :meth:`render_filled_rect`
- :meth:`get_results`
And optionally, if you need to use a Freetype hinting style:
- :meth:`get_hinting_type`
"""
def __init__(self):
self.width = 0
self.height = 0
self.depth = 0
def set_canvas_size(self, w, h, d):
'Dimension the drawing canvas'
self.width = w
self.height = h
self.depth = d
def render_glyph(self, ox, oy, info):
"""
Draw a glyph described by *info* to the reference point (*ox*,
*oy*).
"""
raise NotImplementedError()
def render_filled_rect(self, x1, y1, x2, y2):
"""
Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
raise NotImplementedError()
def get_results(self, box):
"""
Return a backend-specific tuple to return to the backend after
all processing is done.
"""
raise NotImplementedError()
def get_hinting_type(self):
"""
Get the Freetype hinting type to use with this particular
backend.
"""
return LOAD_NO_HINTING
class MathtextBackendAgg(MathtextBackend):
"""
Render glyphs and rectangles to an FTImage buffer, which is later
transferred to the Agg image by the Agg backend.
"""
def __init__(self):
self.ox = 0
self.oy = 0
self.image = None
self.mode = 'bbox'
self.bbox = [0, 0, 0, 0]
MathtextBackend.__init__(self)
def _update_bbox(self, x1, y1, x2, y2):
self.bbox = [min(self.bbox[0], x1),
min(self.bbox[1], y1),
max(self.bbox[2], x2),
max(self.bbox[3], y2)]
def set_canvas_size(self, w, h, d):
MathtextBackend.set_canvas_size(self, w, h, d)
if self.mode != 'bbox':
self.image = FT2Image(ceil(w), ceil(h + d))
def render_glyph(self, ox, oy, info):
if self.mode == 'bbox':
self._update_bbox(ox + info.metrics.xmin,
oy - info.metrics.ymax,
ox + info.metrics.xmax,
oy - info.metrics.ymin)
else:
info.font.draw_glyph_to_bitmap(
self.image, ox, oy - info.metrics.iceberg, info.glyph,
antialiased=rcParams['text.antialiased'])
def render_rect_filled(self, x1, y1, x2, y2):
if self.mode == 'bbox':
self._update_bbox(x1, y1, x2, y2)
else:
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2.0
y = int(center - (height + 1) / 2.0)
else:
y = int(y1)
self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height)
def get_results(self, box, used_characters):
self.mode = 'bbox'
orig_height = box.height
orig_depth = box.depth
ship(0, 0, box)
bbox = self.bbox
bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1]
self.mode = 'render'
self.set_canvas_size(
bbox[2] - bbox[0],
(bbox[3] - bbox[1]) - orig_depth,
(bbox[3] - bbox[1]) - orig_height)
ship(-bbox[0], -bbox[1], box)
result = (self.ox,
self.oy,
self.width,
self.height + self.depth,
self.depth,
self.image,
used_characters)
self.image = None
return result
def get_hinting_type(self):
from matplotlib.backends import backend_agg
return backend_agg.get_hinting_flag()
class MathtextBackendBitmap(MathtextBackendAgg):
def get_results(self, box, used_characters):
ox, oy, width, height, depth, image, characters = \
MathtextBackendAgg.get_results(self, box, used_characters)
return image, depth
class MathtextBackendPs(MathtextBackend):
"""
Store information to write a mathtext rendering to the PostScript
backend.
"""
def __init__(self):
self.pswriter = six.moves.cStringIO()
self.lastfont = None
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
postscript_name = info.postscript_name
fontsize = info.fontsize
symbol_name = info.symbol_name
if (postscript_name, fontsize) != self.lastfont:
ps = """/%(postscript_name)s findfont
%(fontsize)s scalefont
setfont
""" % locals()
self.lastfont = postscript_name, fontsize
self.pswriter.write(ps)
ps = """%(ox)f %(oy)f moveto
/%(symbol_name)s glyphshow\n
""" % locals()
self.pswriter.write(ps)
def render_rect_filled(self, x1, y1, x2, y2):
ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1)
self.pswriter.write(ps)
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.pswriter,
used_characters)
class MathtextBackendPdf(MathtextBackend):
"""
Store information to write a mathtext rendering to the PDF
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
filename = info.font.fname
oy = self.height - oy + info.offset
self.glyphs.append(
(ox, oy, filename, info.fontsize,
info.num, info.symbol_name))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects,
used_characters)
class MathtextBackendSvg(MathtextBackend):
"""
Store information to write a mathtext rendering to the SVG
backend.
"""
def __init__(self):
self.svg_glyphs = []
self.svg_rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
self.svg_glyphs.append(
(info.font, info.fontsize, info.num, ox, oy, info.metrics))
def render_rect_filled(self, x1, y1, x2, y2):
self.svg_rects.append(
(x1, self.height - y1 + 1, x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
svg_elements = Bunch(svg_glyphs = self.svg_glyphs,
svg_rects = self.svg_rects)
return (self.width,
self.height + self.depth,
self.depth,
svg_elements,
used_characters)
class MathtextBackendPath(MathtextBackend):
"""
Store information to write a mathtext rendering to the text path
machinery.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
thetext = info.num
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, self.height-y2 , x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class MathtextBackendCairo(MathtextBackend):
"""
Store information to write a mathtext rendering to the Cairo
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = oy - info.offset - self.height
thetext = unichr_safe(info.num)
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, y1 - self.height, x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class Fonts(object):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop, mathtext_backend):
"""
*default_font_prop*: A
:class:`~matplotlib.font_manager.FontProperties` object to use
for the default non-math font, or the base font for Unicode
(generic) font rendering.
*mathtext_backend*: A subclass of :class:`MathTextBackend`
used to delegate the actual rendering.
"""
self.default_font_prop = default_font_prop
self.mathtext_backend = mathtext_backend
self.used_characters = {}
def destroy(self):
"""
Fix any cyclical references before the object is about
to be destroyed.
"""
self.used_characters = None
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
"""
Get the kerning distance for font between *sym1* and *sym2*.
*fontX*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default/regular (non-math)
*fontclassX*: TODO
*symX*: a symbol in raw TeX form. e.g., '1', 'x' or '\sigma'
*fontsizeX*: the fontsize in points
*dpi*: the current dots-per-inch
"""
return 0.
def get_metrics(self, font, font_class, sym, fontsize, dpi):
"""
*font*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default/regular (non-math)
*font_class*: TODO
*sym*: a symbol in raw TeX form. e.g., '1', 'x' or '\sigma'
*fontsize*: font size in points
*dpi*: current dots-per-inch
Returns an object with the following attributes:
- *advance*: The advance distance (in points) of the glyph.
- *height*: The height of the glyph in points.
- *width*: The width of the glyph in points.
- *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph
- *iceberg* - the distance from the baseline to the top of
the glyph. This corresponds to TeX's definition of
"height".
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
return info.metrics
def set_canvas_size(self, w, h, d):
"""
Set the size of the buffer used to render the math expression.
Only really necessary for the bitmap backends.
"""
self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)
self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)
def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi):
"""
Draw a glyph at
- *ox*, *oy*: position
- *facename*: One of the TeX face names
- *font_class*:
- *sym*: TeX symbol name or single character
- *fontsize*: fontsize in points
- *dpi*: The dpi to draw at.
"""
info = self._get_info(facename, font_class, sym, fontsize, dpi)
realpath, stat_key = get_realpath_and_stat(info.font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].add(info.num)
self.mathtext_backend.render_glyph(ox, oy, info)
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
def get_xheight(self, font, fontsize, dpi):
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font, fontsize, dpi):
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_used_characters(self):
"""
Get the set of characters that were used in the math
expression. Used by backends that need to subset fonts so
they know which glyphs to include.
"""
return self.used_characters
def get_results(self, box):
"""
Get the data needed by the backend to render the math
expression. The return value is backend-specific.
"""
result = self.mathtext_backend.get_results(box, self.get_used_characters())
self.destroy()
return result
def get_sized_alternatives_for_symbol(self, fontname, sym):
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
class TruetypeFonts(Fonts):
"""
A generic base class for all font setups that use Truetype fonts
(through FT2Font).
"""
class CachedFont:
def __init__(self, font):
self.font = font
self.charmap = font.get_charmap()
self.glyphmap = dict(
[(glyphind, ccode) for ccode, glyphind in six.iteritems(self.charmap)])
def __repr__(self):
return repr(self.font)
def __init__(self, default_font_prop, mathtext_backend):
Fonts.__init__(self, default_font_prop, mathtext_backend)
self.glyphd = {}
self._fonts = {}
filename = findfont(default_font_prop)
default_font = self.CachedFont(FT2Font(filename))
self._fonts['default'] = default_font
self._fonts['regular'] = default_font
def destroy(self):
self.glyphd = None
Fonts.destroy(self)
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self._fonts.get(basename)
if cached_font is None and os.path.exists(basename):
font = FT2Font(basename)
cached_font = self.CachedFont(font)
self._fonts[basename] = cached_font
self._fonts[font.postscript_name] = cached_font
self._fonts[font.postscript_name.lower()] = cached_font
return cached_font
def _get_offset(self, cached_font, glyph, fontsize, dpi):
if cached_font.font.postscript_name == 'Cmex10':
return ((glyph.height/64.0/2.0) + (fontsize/3.0 * dpi/72.0))
return 0.
def _get_info(self, fontname, font_class, sym, fontsize, dpi):
key = fontname, font_class, sym, fontsize, dpi
bunch = self.glyphd.get(key)
if bunch is not None:
return bunch
cached_font, num, symbol_name, fontsize, slanted = \
self._get_glyph(fontname, font_class, sym, fontsize)
font = cached_font.font
font.set_size(fontsize, dpi)
glyph = font.load_char(
num,
flags=self.mathtext_backend.get_hinting_type())
xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
offset = self._get_offset(cached_font, glyph, fontsize, dpi)
metrics = Bunch(
advance = glyph.linearHoriAdvance/65536.0,
height = glyph.height/64.0,
width = glyph.width/64.0,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = glyph.horiBearingY/64.0 + offset,
slanted = slanted
)
result = self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.postscript_name,
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return result
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
cached_font.font.set_size(fontsize, dpi)
pclt = cached_font.font.get_sfnt_table('pclt')
if pclt is None:
# Some fonts don't store the xHeight, so we do a poor man's xHeight
metrics = self.get_metrics(font, rcParams['mathtext.default'], 'x', fontsize, dpi)
return metrics.iceberg
xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
return xHeight
def get_underline_thickness(self, font, fontsize, dpi):
# This function used to grab underline thickness from the font
# metrics, but that information is just too un-reliable, so it
# is now hardcoded.
return ((0.75 / 12.0) * fontsize * dpi) / 72.0
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
class BakomaFonts(TruetypeFonts):
"""
Use the Bakoma TrueType fonts for rendering.
Symbols are strewn about a number of font files, each of which has
its own proprietary 8-bit encoding.
"""
_fontmap = { 'cal' : 'cmsy10',
'rm' : 'cmr10',
'tt' : 'cmtt10',
'it' : 'cmmi10',
'bf' : 'cmb10',
'sf' : 'cmss10',
'ex' : 'cmex10'
}
def __init__(self, *args, **kwargs):
self._stix_fallback = StixFonts(*args, **kwargs)
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for key, val in six.iteritems(self._fontmap):
fullpath = findfont(val)
self.fontmap[key] = fullpath
self.fontmap[val] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _get_glyph(self, fontname, font_class, sym, fontsize):
symbol_name = None
if fontname in self.fontmap and sym in latex_to_bakoma:
basename, num = latex_to_bakoma[sym]
slanted = (basename == "cmmi10") or sym in self._slanted_symbols
cached_font = self._get_font(basename)
if cached_font is not None:
symbol_name = cached_font.font.get_glyph_name(num)
num = cached_font.glyphmap[num]
elif len(sym) == 1:
slanted = (fontname == "it")
cached_font = self._get_font(fontname)
if cached_font is not None:
num = ord(sym)
gid = cached_font.charmap.get(num)
if gid is not None:
symbol_name = cached_font.font.get_glyph_name(
cached_font.charmap[num])
if symbol_name is None:
return self._stix_fallback._get_glyph(
fontname, font_class, sym, fontsize)
return cached_font, num, symbol_name, fontsize, slanted
# The Bakoma fonts contain many pre-sized alternatives for the
# delimiters. The AutoSizedChar class will use these alternatives
# and select the best (closest sized) glyph.
_size_alternatives = {
'(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
('ex', '\xb5'), ('ex', '\xc3')],
')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
('ex', '\xb6'), ('ex', '\x21')],
'{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
('ex', '\xbd'), ('ex', '\x28')],
'}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
('ex', '\xbe'), ('ex', '\x29')],
# The fourth size of '[' is mysteriously missing from the BaKoMa
# font, so I've ommitted it for both '[' and ']'
'[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
('ex', '\x22')],
']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
('ex', '\x23')],
r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'),
('ex', '\xb9'), ('ex', '\x24')],
r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'),
('ex', '\xba'), ('ex', '\x25')],
r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'),
('ex', '\xbb'), ('ex', '\x26')],
r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'),
('ex', '\xbc'), ('ex', '\x27')],
r'\langle' : [('ex', '\xad'), ('ex', '\x44'),
('ex', '\xbf'), ('ex', '\x2a')],
r'\rangle' : [('ex', '\xae'), ('ex', '\x45'),
('ex', '\xc0'), ('ex', '\x2b')],
r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'),
('ex', '\x72'), ('ex', '\x73')],
r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
('ex', '\xc2'), ('ex', '\x2d')],
r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
('ex', '\xcb'), ('ex', '\x2c')],
r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
('ex', '\x64')],
r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
('ex', '\x67')],
r'<' : [('cal', 'h'), ('ex', 'D')],
r'>' : [('cal', 'i'), ('ex', 'E')]
}
for alias, target in [('\leftparen', '('),
('\rightparent', ')'),
('\leftbrace', '{'),
('\rightbrace', '}'),
('\leftbracket', '['),
('\rightbracket', ']'),
(r'\{', '{'),
(r'\}', '}'),
(r'\[', '['),
(r'\]', ']')]:
_size_alternatives[alias] = _size_alternatives[target]
def get_sized_alternatives_for_symbol(self, fontname, sym):
return self._size_alternatives.get(sym, [(fontname, sym)])
class UnicodeFonts(TruetypeFonts):
"""
An abstract base class for handling Unicode fonts.
While some reasonably complete Unicode fonts (such as DejaVu) may
work in some situations, the only Unicode font I'm aware of with a
complete set of math symbols is STIX.
This class will "fallback" on the Bakoma fonts when a required
symbol can not be found in the font.
"""
use_cmex = True
def __init__(self, *args, **kwargs):
# This must come first so the backend's owner is set correctly
if rcParams['mathtext.fallback_to_cm']:
self.cm_fallback = BakomaFonts(*args, **kwargs)
else:
self.cm_fallback = None
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for texfont in "cal rm tt it bf sf".split():
prop = rcParams['mathtext.' + texfont]
font = findfont(prop)
self.fontmap[texfont] = font
prop = FontProperties('cmex10')
font = findfont(prop)
self.fontmap['ex'] = font
_slanted_symbols = set(r"\int \oint".split())
def _map_virtual_font(self, fontname, font_class, uniindex):
return fontname, uniindex
def _get_glyph(self, fontname, font_class, sym, fontsize):
found_symbol = False
if self.use_cmex:
uniindex = latex_to_cmex.get(sym)
if uniindex is not None:
fontname = 'ex'
found_symbol = True
if not found_symbol:
try:
uniindex = get_unicode_index(sym)
found_symbol = True
except ValueError:
uniindex = ord('?')
warn("No TeX to unicode mapping for '%s'" %
sym.encode('ascii', 'backslashreplace'),
MathTextWarning)
fontname, uniindex = self._map_virtual_font(
fontname, font_class, uniindex)
new_fontname = fontname
# Only characters in the "Letter" class should be italicized in 'it'
# mode. Greek capital letters should be Roman.
if found_symbol:
if fontname == 'it':
if uniindex < 0x10000:
unistring = unichr(uniindex)
if (not unicodedata.category(unistring)[0] == "L"
or unicodedata.name(unistring).startswith("GREEK CAPITAL")):
new_fontname = 'rm'
slanted = (new_fontname == 'it') or sym in self._slanted_symbols
found_symbol = False
cached_font = self._get_font(new_fontname)
if cached_font is not None:
try:
glyphindex = cached_font.charmap[uniindex]
found_symbol = True
except KeyError:
pass
if not found_symbol:
if self.cm_fallback:
warn("Substituting with a symbol from Computer Modern.",
MathTextWarning)
return self.cm_fallback._get_glyph(
fontname, 'it', sym, fontsize)
else:
if fontname in ('it', 'regular') and isinstance(self, StixFonts):
return self._get_glyph('rm', font_class, sym, fontsize)
warn("Font '%s' does not have a glyph for '%s' [U%x]" %
(new_fontname, sym.encode('ascii', 'backslashreplace'), uniindex),
MathTextWarning)
warn("Substituting with a dummy symbol.", MathTextWarning)
fontname = 'rm'
new_fontname = fontname
cached_font = self._get_font(fontname)
uniindex = 0xA4 # currency character, for lack of anything better
glyphindex = cached_font.charmap[uniindex]
slanted = False
symbol_name = cached_font.font.get_glyph_name(glyphindex)
return cached_font, uniindex, symbol_name, fontsize, slanted
def get_sized_alternatives_for_symbol(self, fontname, sym):
if self.cm_fallback:
return self.cm_fallback.get_sized_alternatives_for_symbol(
fontname, sym)
return [(fontname, sym)]
class StixFonts(UnicodeFonts):
"""
A font handling class for the STIX fonts.
In addition to what UnicodeFonts provides, this class:
- supports "virtual fonts" which are complete alpha numeric
character sets with different font styles at special Unicode
code points, such as "Blackboard".
- handles sized alternative characters for the STIXSizeX fonts.
"""
_fontmap = { 'rm' : 'STIXGeneral',
'it' : 'STIXGeneral:italic',
'bf' : 'STIXGeneral:weight=bold',
'nonunirm' : 'STIXNonUnicode',
'nonuniit' : 'STIXNonUnicode:italic',
'nonunibf' : 'STIXNonUnicode:weight=bold',
0 : 'STIXGeneral',
1 : 'STIXSizeOneSym',
2 : 'STIXSizeTwoSym',
3 : 'STIXSizeThreeSym',
4 : 'STIXSizeFourSym',
5 : 'STIXSizeFiveSym'
}
use_cmex = False
cm_fallback = False
_sans = False
def __init__(self, *args, **kwargs):
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for key, name in six.iteritems(self._fontmap):
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _map_virtual_font(self, fontname, font_class, uniindex):
# Handle these "fonts" that are actually embedded in
# other fonts.
mapping = stix_virtual_fonts.get(fontname)
if (self._sans and mapping is None and
fontname not in ('regular', 'default')):
mapping = stix_virtual_fonts['sf']
doing_sans_conversion = True
else:
doing_sans_conversion = False
if mapping is not None:
if isinstance(mapping, dict):
mapping = mapping.get(font_class, 'rm')
# Binary search for the source glyph
lo = 0
hi = len(mapping)
while lo < hi:
mid = (lo+hi)//2
range = mapping[mid]
if uniindex < range[0]:
hi = mid
elif uniindex <= range[1]:
break
else:
lo = mid + 1
if uniindex >= range[0] and uniindex <= range[1]:
uniindex = uniindex - range[0] + range[3]
fontname = range[2]
elif not doing_sans_conversion:
# This will generate a dummy character
uniindex = 0x1
fontname = rcParams['mathtext.default']
# Handle private use area glyphs
if (fontname in ('it', 'rm', 'bf') and
uniindex >= 0xe000 and uniindex <= 0xf8ff):
fontname = 'nonuni' + fontname
return fontname, uniindex
_size_alternatives = {}
def get_sized_alternatives_for_symbol(self, fontname, sym):
fixes = {'\{': '{', '\}': '}', '\[': '[', '\]': ']'}
sym = fixes.get(sym, sym)
alternatives = self._size_alternatives.get(sym)
if alternatives:
return alternatives
alternatives = []
try:
uniindex = get_unicode_index(sym)
except ValueError:
return [(fontname, sym)]
fix_ups = {
ord('<'): 0x27e8,
ord('>'): 0x27e9 }
uniindex = fix_ups.get(uniindex, uniindex)
for i in range(6):
cached_font = self._get_font(i)
glyphindex = cached_font.charmap.get(uniindex)
if glyphindex is not None:
alternatives.append((i, unichr_safe(uniindex)))
# The largest size of the radical symbol in STIX has incorrect
# metrics that cause it to be disconnected from the stem.
if sym == r'\__sqrt__':
alternatives = alternatives[:-1]
self._size_alternatives[sym] = alternatives
return alternatives
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
class StandardPsFonts(Fonts):
"""
Use the standard postscript fonts for rendering to backend_ps
Unlike the other font classes, BakomaFont and UnicodeFont, this
one requires the Ps backend.
"""
basepath = os.path.join( get_data_path(), 'fonts', 'afm' )
fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery
'rm' : 'pncr8a', # New Century Schoolbook
'tt' : 'pcrr8a', # Courier
'it' : 'pncri8a', # New Century Schoolbook Italic
'sf' : 'phvr8a', # Helvetica
'bf' : 'pncb8a', # New Century Schoolbook Bold
None : 'psyr' # Symbol
}
def __init__(self, default_font_prop):
Fonts.__init__(self, default_font_prop, MathtextBackendPs())
self.glyphd = {}
self.fonts = {}
filename = findfont(default_font_prop, fontext='afm',
directory=self.basepath)
if filename is None:
filename = findfont('Helvetica', fontext='afm',
directory=self.basepath)
with open(filename, 'r') as fd:
default_font = AFM(fd)
default_font.fname = filename
self.fonts['default'] = default_font
self.fonts['regular'] = default_font
self.pswriter = six.moves.cStringIO()
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self.fonts.get(basename)
if cached_font is None:
fname = os.path.join(self.basepath, basename + ".afm")
with open(fname, 'r') as fd:
cached_font = AFM(fd)
cached_font.fname = fname
self.fonts[basename] = cached_font
self.fonts[cached_font.get_fontname()] = cached_font
return cached_font
def _get_info (self, fontname, font_class, sym, fontsize, dpi):
'load the cmfont, metrics and glyph with caching'
key = fontname, sym, fontsize, dpi
tup = self.glyphd.get(key)
if tup is not None:
return tup
# Only characters in the "Letter" class should really be italicized.
# This class includes greek letters, so we're ok
if (fontname == 'it' and
(len(sym) > 1 or
not unicodedata.category(six.text_type(sym)).startswith("L"))):
fontname = 'rm'
found_symbol = False
if sym in latex_to_standard:
fontname, num = latex_to_standard[sym]
glyph = chr(num)
found_symbol = True
elif len(sym) == 1:
glyph = sym
num = ord(glyph)
found_symbol = True
else:
warn("No TeX to built-in Postscript mapping for '%s'" % sym,
MathTextWarning)
slanted = (fontname == 'it')
font = self._get_font(fontname)
if found_symbol:
try:
symbol_name = font.get_name_char(glyph)
except KeyError:
warn("No glyph in standard Postscript font '%s' for '%s'" %
(font.postscript_name, sym),
MathTextWarning)
found_symbol = False
if not found_symbol:
glyph = sym = '?'
num = ord(glyph)
symbol_name = font.get_name_char(glyph)
offset = 0
scale = 0.001 * fontsize
xmin, ymin, xmax, ymax = [val * scale
for val in font.get_bbox_char(glyph)]
metrics = Bunch(
advance = font.get_width_char(glyph) * scale,
width = font.get_width_char(glyph) * scale,
height = font.get_height_char(glyph) * scale,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = ymax + offset,
slanted = slanted
)
self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.get_fontname(),
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return self.glyphd[key]
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return (font.get_kern_dist(info1.glyph, info2.glyph)
* 0.001 * fontsize1)
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_xheight() * 0.001 * fontsize
def get_underline_thickness(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_underline_thickness() * 0.001 * fontsize
##############################################################################
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (Ship class)
# Packaging (hpack and vpack)
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g., node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
# How much text shrinks when going to the next-smallest level. GROW_FACTOR
# must be the inverse of SHRINK_FACTOR.
SHRINK_FACTOR = 0.7
GROW_FACTOR = 1.0 / SHRINK_FACTOR
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 6
# Percentage of x-height of additional horiz. space after sub/superscripts
SCRIPT_SPACE = 0.2
# Percentage of x-height that sub/superscripts drop below the baseline
SUBDROP = 0.3
# Percentage of x-height that superscripts drop below the baseline
SUP1 = 0.5
# Percentage of x-height that subscripts drop below the baseline
SUB1 = 0.0
# Percentage of x-height that superscripts are offset relative to the subscript
DELTA = 0.18
class MathTextWarning(Warning):
pass
class Node(object):
"""
A node in the TeX box model
"""
def __init__(self):
self.size = 0
def __repr__(self):
return self.__internal_repr__()
def __internal_repr__(self):
return self.__class__.__name__
def get_kerning(self, next):
return 0.0
def shrink(self):
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def grow(self):
"""
Grows one level larger. There is no limit to how big
something can get.
"""
self.size -= 1
def render(self, x, y):
pass
class Box(Node):
"""
Represents any node with a physical location.
"""
def __init__(self, width, height, depth):
Node.__init__(self)
self.width = width
self.height = height
self.depth = depth
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
def render(self, x1, y1, x2, y2):
pass
class Vbox(Box):
"""
A box with only height (zero width).
"""
def __init__(self, height, depth):
Box.__init__(self, 0., height, depth)
class Hbox(Box):
"""
A box with only width (zero height and depth).
"""
def __init__(self, width):
Box.__init__(self, width, 0., 0.)
class Char(Node):
"""
Represents a single character. Unlike TeX, the font information
and metrics are stored with each :class:`Char` to make it easier
to lookup the font metrics when needed. Note that TeX boxes have
a width, height, and depth, unlike Type1 and Truetype which use a
full bounding box and an advance in the x-direction. The metrics
must be converted to the TeX way, and the advance (if different
from width) must be converted into a :class:`Kern` node when the
:class:`Char` is added to its parent :class:`Hlist`.
"""
def __init__(self, c, state):
Node.__init__(self)
self.c = c
self.font_output = state.font_output
assert isinstance(state.font, (six.string_types, int))
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __internal_repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given
character. Called when characters are strung together into
:class:`Hlist` to create :class:`Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.font_output.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, x, y):
"""
Render the character to the canvas
"""
self.font_output.render_glyph(
x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.fontsize *= GROW_FACTOR
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self):
Char.shrink(self)
self._update_metrics()
def grow(self):
Char.grow(self)
self._update_metrics()
def render(self, x, y):
"""
Render the character to the canvas.
"""
self.font_output.render_glyph(
x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""
A list of nodes (either horizontal or vertical).
"""
def __init__(self, elements):
Box.__init__(self, 0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
self.__internal_repr__(),
self.width, self.height,
self.depth, self.shift_amount,
' '.join([repr(x) for x in self.children]))
def _determine_order(self, totals):
"""
A helper function to determine the highest order of glue
used by the members of this list. Used by vpack and hpack.
"""
o = 0
for i in range(len(totals) - 1, 0, -1):
if totals[i] != 0.0:
o = i
break
return o
def _set_glue(self, x, sign, totals, error_type):
o = self._determine_order(totals)
self.glue_order = o
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
MathTextWarning)
def shrink(self):
for child in self.children:
child.shrink()
Box.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
def grow(self):
for child in self.children:
child.grow()
Box.grow(self)
self.shift_amount *= GROW_FACTOR
self.glue_set *= GROW_FACTOR
class Hlist(List):
"""
A horizontal list of boxes.
"""
def __init__(self, elements, w=0., m='additional', do_kern=True):
List.__init__(self, elements)
if do_kern:
self.kern()
self.hpack()
def kern(self):
"""
Insert :class:`Kern` nodes between :class:`Char` nodes to set
kerning. The :class:`Char` nodes themselves determine the
amount of kerning they need (in :meth:`~Char.get_kerning`),
and this function just creates the linked list in the correct
way.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
# This is a failed experiment to fake cross-font kerning.
# def get_kerning(self, next):
# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
# if isinstance(next, Char):
# print "CASE A"
# return self.children[-2].get_kerning(next)
# elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char):
# print "CASE B"
# result = self.children[-2].get_kerning(next.children[0])
# print result
# return result
# return 0.0
def hpack(self, w=0., m='additional'):
"""
The main duty of :meth:`hpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items
may stick out if negative glue is used, if the box is
overfull, or if a ``\\vbox`` includes other boxes that have
been shifted left.
- *w*: specifies a width
- *m*: is either 'exactly' or 'additional'.
Thus, ``hpack(w, 'exactly')`` produces a box whose width is
exactly *w*, while ``hpack(w, 'additional')`` yields a box
whose width is the natural width plus *w*. The default values
produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
#self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not isinf(p.height) and not isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Vlist(List):
"""
A vertical list of boxes.
"""
def __init__(self, elements, h=0., m='additional'):
List.__init__(self, elements)
self.vpack()
def vpack(self, h=0., m='additional', l=float(inf)):
"""
The main duty of :meth:`vpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified.
- *h*: specifies a height
- *m*: is either 'exactly' or 'additional'.
- *l*: a maximum height
Thus, ``vpack(h, 'exactly')`` produces a box whose height is
exactly *h*, while ``vpack(h, 'additional')`` yields a box
whose height is the natural height plus *h*. The default
values produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Rule(Box):
"""
A :class:`Rule` node stands for a solid black rectangle; it has
*width*, *depth*, and *height* fields just as in an
:class:`Hlist`. However, if any of these dimensions is inf, the
actual value will be determined by running the rule up to the
boundary of the innermost enclosing box. This is called a "running
dimension." The width is never running in an :class:`Hlist`; the
height and depth are never running in a :class:`Vlist`.
"""
def __init__(self, width, height, depth, state):
Box.__init__(self, width, height, depth)
self.font_output = state.font_output
def render(self, x, y, w, h):
self.font_output.render_rect_filled(x, y, x + w, y + h)
class Hrule(Rule):
"""
Convenience class to create a horizontal rule.
"""
def __init__(self, state, thickness=None):
if thickness is None:
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = depth = thickness * 0.5
Rule.__init__(self, inf, height, depth, state)
class Vrule(Rule):
"""
Convenience class to create a vertical rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
Rule.__init__(self, thickness, inf, inf, state)
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
:class:`GlueSpec` class, which is shared between multiple glue objects. (This
is a memory optimization which probably doesn't matter anymore, but it's
easier to stick to what TeX does.)
"""
def __init__(self, glue_type, copy=False):
Node.__init__(self)
self.glue_subtype = 'normal'
if is_string_like(glue_type):
glue_spec = GlueSpec.factory(glue_type)
elif isinstance(glue_type, GlueSpec):
glue_spec = glue_type
else:
raise ArgumentError("glue_type must be a glue spec name or instance.")
if copy:
glue_spec = glue_spec.copy()
self.glue_spec = glue_spec
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= GROW_FACTOR
class GlueSpec(object):
"""
See :class:`Glue`.
"""
def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
self.width = width
self.stretch = stretch
self.stretch_order = stretch_order
self.shrink = shrink
self.shrink_order = shrink_order
def copy(self):
return GlueSpec(
self.width,
self.stretch,
self.stretch_order,
self.shrink,
self.shrink_order)
def factory(cls, glue_type):
return cls._types[glue_type]
factory = classmethod(factory)
GlueSpec._types = {
'fil': GlueSpec(0., 1., 1, 0., 0),
'fill': GlueSpec(0., 1., 2, 0., 0),
'filll': GlueSpec(0., 1., 3, 0., 0),
'neg_fil': GlueSpec(0., 0., 0, 1., 1),
'neg_fill': GlueSpec(0., 0., 0, 1., 2),
'neg_filll': GlueSpec(0., 0., 0, 1., 3),
'empty': GlueSpec(0., 0., 0, 0., 0),
'ss': GlueSpec(0., 1., 1, -1., 1)
}
# Some convenient ways to get common kinds of glue
class Fil(Glue):
def __init__(self):
Glue.__init__(self, 'fil')
class Fill(Glue):
def __init__(self):
Glue.__init__(self, 'fill')
class Filll(Glue):
def __init__(self):
Glue.__init__(self, 'filll')
class NegFil(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fil')
class NegFill(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fill')
class NegFilll(Glue):
def __init__(self):
Glue.__init__(self, 'neg_filll')
class SsGlue(Glue):
def __init__(self):
Glue.__init__(self, 'ss')
class HCentered(Hlist):
"""
A convenience class to create an :class:`Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
do_kern=False)
class VCentered(Hlist):
"""
A convenience class to create a :class:`Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
class Kern(Node):
"""
A :class:`Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
height = 0
depth = 0
def __init__(self, width):
Node.__init__(self)
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
class SubSuperCluster(Hlist):
"""
:class:`SubSuperCluster` is a sort of hack to get around that fact
that this code do a two-pass parse like TeX. This lets us store
enough information in the hlist itself, namely the nucleus, sub-
and super-script, such that if another script follows that needs
to be attached, it can be reconfigured on the fly.
"""
def __init__(self):
self.nucleus = None
self.sub = None
self.super = None
Hlist.__init__(self, [])
class AutoHeightChar(Hlist):
"""
:class:`AutoHeightChar` will create a character as close to the
given height and depth as possible. When using a font with
multiple height versions of some characters (such as the BaKoMa
fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c, height, depth, state, always=False, factor=None):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
if char.height + char.depth >= target_total:
break
if factor is None:
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
Hlist.__init__(self, [char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
:class:`AutoWidthChar` will create a character as close to the
given width as possible. When using a font with multiple width
versions of some characters (such as the BaKoMa fonts), the
correct glyph will be selected, otherwise this will always just
return a scaled version of the glyph.
"""
def __init__(self, c, width, state, always=False, char_class=Char):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
Hlist.__init__(self, [char])
self.width = char.width
class Ship(object):
"""
Once the boxes have been set up, this sends them to output. Since
boxes can be inside of boxes inside of boxes, the main work of
:class:`Ship` is done by two mutually recursive routines,
:meth:`hlist_out` and :meth:`vlist_out`, which traverse the
:class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store
state as it processes have become member variables here.
"""
def __call__(self, ox, oy, box):
self.max_push = 0 # Deepest nesting of push commands so far
self.cur_s = 0
self.cur_v = 0.
self.cur_h = 0.
self.off_h = ox
self.off_v = oy + box.height
self.hlist_out(box)
def clamp(value):
if value < -1000000000.:
return -1000000000.
if value > 1000000000.:
return 1000000000.
return value
clamp = staticmethod(clamp)
def hlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = self.cur_v
left_edge = self.cur_h
self.cur_s += 1
self.max_push = max(self.cur_s, self.max_push)
clamp = self.clamp
for p in box.children:
if isinstance(p, Char):
p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
self.cur_h += p.width
elif isinstance(p, Kern):
self.cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
self.cur_h += p.width
else:
edge = self.cur_h
self.cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
self.hlist_out(p)
else:
# p.vpack(box.height + box.depth, 'exactly')
self.vlist_out(p)
self.cur_h = edge + p.width
self.cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_height):
rule_height = box.height
if isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
self.cur_v = baseline + rule_depth
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
self.cur_v = baseline
self.cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_width += cur_g
self.cur_h += rule_width
self.cur_s -= 1
def vlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
self.cur_s += 1
self.max_push = max(self.max_push, self.cur_s)
left_edge = self.cur_h
self.cur_v -= box.height
top_edge = self.cur_v
clamp = self.clamp
for p in box.children:
if isinstance(p, Kern):
self.cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
self.cur_v += p.height + p.depth
else:
self.cur_v += p.height
self.cur_h = left_edge + p.shift_amount
save_v = self.cur_v
p.width = box.width
if isinstance(p, Hlist):
self.hlist_out(p)
else:
self.vlist_out(p)
self.cur_v = save_v + p.depth
self.cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
self.cur_v += rule_height
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_height += cur_g
self.cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in vlist")
self.cur_s -= 1
ship = Ship()
##############################################################################
# PARSER
def Error(msg):
"""
Helper class to raise parser errors.
"""
def raise_error(s, loc, toks):
raise ParseFatalException(s, loc, msg)
empty = Empty()
empty.setParseAction(raise_error)
return empty
class Parser(object):
"""
This is the pyparsing-based parser for math expressions. It
actually parses full strings *containing* math expressions, in
that raw text may also appear outside of pairs of ``$``.
The grammar is based directly on that in TeX, though it cuts a few
corners.
"""
_binary_operators = set('''
+ *
\\pm \\sqcap \\rhd
\\mp \\sqcup \\unlhd
\\times \\vee \\unrhd
\\div \\wedge \\oplus
\\ast \\setminus \\ominus
\\star \\wr \\otimes
\\circ \\diamond \\oslash
\\bullet \\bigtriangleup \\odot
\\cdot \\bigtriangledown \\bigcirc
\\cap \\triangleleft \\dagger
\\cup \\triangleright \\ddagger
\\uplus \\lhd \\amalg'''.split())
_relation_symbols = set('''
= < > :
\\leq \\geq \\equiv \\models
\\prec \\succ \\sim \\perp
\\preceq \\succeq \\simeq \\mid
\\ll \\gg \\asymp \\parallel
\\subset \\supset \\approx \\bowtie
\\subseteq \\supseteq \\cong \\Join
\\sqsubset \\sqsupset \\neq \\smile
\\sqsubseteq \\sqsupseteq \\doteq \\frown
\\in \\ni \\propto
\\vdash \\dashv \\dots'''.split())
_arrow_symbols = set('''
\\leftarrow \\longleftarrow \\uparrow
\\Leftarrow \\Longleftarrow \\Uparrow
\\rightarrow \\longrightarrow \\downarrow
\\Rightarrow \\Longrightarrow \\Downarrow
\\leftrightarrow \\longleftrightarrow \\updownarrow
\\Leftrightarrow \\Longleftrightarrow \\Updownarrow
\\mapsto \\longmapsto \\nearrow
\\hookleftarrow \\hookrightarrow \\searrow
\\leftharpoonup \\rightharpoonup \\swarrow
\\leftharpoondown \\rightharpoondown \\nwarrow
\\rightleftharpoons \\leadsto'''.split())
_spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
_punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
_overunder_symbols = set(r'''
\sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
\bigwedge \bigodot \bigotimes \bigoplus \biguplus
'''.split())
_overunder_functions = set(
r"lim liminf limsup sup max min".split())
_dropsub_symbols = set(r'''\int \oint'''.split())
_fontnames = set("rm cal it tt sf bf default bb frak circled scr regular".split())
_function_names = set("""
arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
coth inf max tanh""".split())
_ambi_delim = set("""
| \\| / \\backslash \\uparrow \\downarrow \\updownarrow \\Uparrow
\\Downarrow \\Updownarrow .""".split())
_left_delim = set(r"( [ \{ < \lfloor \langle \lceil".split())
_right_delim = set(r") ] \} > \rfloor \rangle \rceil".split())
def __init__(self):
p = Bunch()
# All forward declarations are here
p.accent = Forward()
p.ambi_delim = Forward()
p.apostrophe = Forward()
p.auto_delim = Forward()
p.binom = Forward()
p.bslash = Forward()
p.c_over_c = Forward()
p.customspace = Forward()
p.end_group = Forward()
p.float_literal = Forward()
p.font = Forward()
p.frac = Forward()
p.function = Forward()
p.genfrac = Forward()
p.group = Forward()
p.int_literal = Forward()
p.latexfont = Forward()
p.lbracket = Forward()
p.left_delim = Forward()
p.lbrace = Forward()
p.main = Forward()
p.math = Forward()
p.math_string = Forward()
p.non_math = Forward()
p.operatorname = Forward()
p.overline = Forward()
p.placeable = Forward()
p.rbrace = Forward()
p.rbracket = Forward()
p.required_group = Forward()
p.right_delim = Forward()
p.right_delim_safe = Forward()
p.simple = Forward()
p.simple_group = Forward()
p.single_symbol = Forward()
p.space = Forward()
p.sqrt = Forward()
p.stackrel = Forward()
p.start_group = Forward()
p.subsuper = Forward()
p.subsuperop = Forward()
p.symbol = Forward()
p.symbol_name = Forward()
p.token = Forward()
p.unknown_symbol = Forward()
# Set names on everything -- very useful for debugging
for key, val in vars(p).items():
if not key.startswith('_'):
val.setName(key)
p.float_literal <<= Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
p.int_literal <<= Regex("[-+]?[0-9]+")
p.lbrace <<= Literal('{').suppress()
p.rbrace <<= Literal('}').suppress()
p.lbracket <<= Literal('[').suppress()
p.rbracket <<= Literal(']').suppress()
p.bslash <<= Literal('\\')
p.space <<= oneOf(list(six.iterkeys(self._space_widths)))
p.customspace <<= (Suppress(Literal(r'\hspace'))
- ((p.lbrace + p.float_literal + p.rbrace)
| Error(r"Expected \hspace{n}")))
unicode_range = "\U00000080-\U0001ffff"
p.single_symbol <<= Regex(r"([a-zA-Z0-9 +\-*/<>=:,.;!\?&'@()\[\]|%s])|(\\[%%${}\[\]_|])" %
unicode_range)
p.symbol_name <<= (Combine(p.bslash + oneOf(list(six.iterkeys(tex2uni)))) +
FollowedBy(Regex("[^A-Za-z]").leaveWhitespace() | StringEnd()))
p.symbol <<= (p.single_symbol | p.symbol_name).leaveWhitespace()
p.apostrophe <<= Regex("'+")
p.c_over_c <<= Suppress(p.bslash) + oneOf(list(six.iterkeys(self._char_over_chars)))
p.accent <<= Group(
Suppress(p.bslash)
+ oneOf(list(six.iterkeys(self._accent_map)) + list(self._wide_accents))
- p.placeable
)
p.function <<= Suppress(p.bslash) + oneOf(list(self._function_names))
p.start_group <<= Optional(p.latexfont) + p.lbrace
p.end_group <<= p.rbrace.copy()
p.simple_group <<= Group(p.lbrace + ZeroOrMore(p.token) + p.rbrace)
p.required_group<<= Group(p.lbrace + OneOrMore(p.token) + p.rbrace)
p.group <<= Group(p.start_group + ZeroOrMore(p.token) + p.end_group)
p.font <<= Suppress(p.bslash) + oneOf(list(self._fontnames))
p.latexfont <<= Suppress(p.bslash) + oneOf(['math' + x for x in self._fontnames])
p.frac <<= Group(
Suppress(Literal(r"\frac"))
- ((p.required_group + p.required_group) | Error(r"Expected \frac{num}{den}"))
)
p.stackrel <<= Group(
Suppress(Literal(r"\stackrel"))
- ((p.required_group + p.required_group) | Error(r"Expected \stackrel{num}{den}"))
)
p.binom <<= Group(
Suppress(Literal(r"\binom"))
- ((p.required_group + p.required_group) | Error(r"Expected \binom{num}{den}"))
)
p.ambi_delim <<= oneOf(list(self._ambi_delim))
p.left_delim <<= oneOf(list(self._left_delim))
p.right_delim <<= oneOf(list(self._right_delim))
p.right_delim_safe <<= oneOf(list(self._right_delim - set(['}'])) + [r'\}'])
p.genfrac <<= Group(
Suppress(Literal(r"\genfrac"))
- (((p.lbrace + Optional(p.ambi_delim | p.left_delim, default='') + p.rbrace)
+ (p.lbrace + Optional(p.ambi_delim | p.right_delim_safe, default='') + p.rbrace)
+ (p.lbrace + p.float_literal + p.rbrace)
+ p.simple_group + p.required_group + p.required_group)
| Error(r"Expected \genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}"))
)
p.sqrt <<= Group(
Suppress(Literal(r"\sqrt"))
- ((Optional(p.lbracket + p.int_literal + p.rbracket, default=None)
+ p.required_group)
| Error("Expected \sqrt{value}"))
)
p.overline <<= Group(
Suppress(Literal(r"\overline"))
- (p.required_group | Error("Expected \overline{value}"))
)
p.unknown_symbol<<= Combine(p.bslash + Regex("[A-Za-z]*"))
p.operatorname <<= Group(
Suppress(Literal(r"\operatorname"))
- ((p.lbrace + ZeroOrMore(p.simple | p.unknown_symbol) + p.rbrace)
| Error("Expected \operatorname{value}"))
)
p.placeable <<= ( p.accent # Must be first
| p.symbol # Must be second
| p.c_over_c
| p.function
| p.group
| p.frac
| p.stackrel
| p.binom
| p.genfrac
| p.sqrt
| p.overline
| p.operatorname
)
p.simple <<= ( p.space
| p.customspace
| p.font
| p.subsuper
)
p.subsuperop <<= oneOf(["_", "^"])
p.subsuper <<= Group(
(Optional(p.placeable) + OneOrMore(p.subsuperop - p.placeable) + Optional(p.apostrophe))
| (p.placeable + Optional(p.apostrophe))
| p.apostrophe
)
p.token <<= ( p.simple
| p.auto_delim
| p.unknown_symbol # Must be last
)
p.auto_delim <<= (Suppress(Literal(r"\left"))
- ((p.left_delim | p.ambi_delim) | Error("Expected a delimiter"))
+ Group(ZeroOrMore(p.simple | p.auto_delim))
+ Suppress(Literal(r"\right"))
- ((p.right_delim | p.ambi_delim) | Error("Expected a delimiter"))
)
p.math <<= OneOrMore(p.token)
p.math_string <<= QuotedString('$', '\\', unquoteResults=False)
p.non_math <<= Regex(r"(?:(?:\\[$])|[^$])*").leaveWhitespace()
p.main <<= (p.non_math + ZeroOrMore(p.math_string + p.non_math)) + StringEnd()
# Set actions
for key, val in vars(p).items():
if not key.startswith('_'):
if hasattr(self, key):
val.setParseAction(getattr(self, key))
self._expression = p.main
self._math_expression = p.math
def parse(self, s, fonts_object, fontsize, dpi):
"""
Parse expression *s* using the given *fonts_object* for
output, at the given *fontsize* and *dpi*.
Returns the parse tree of :class:`Node` instances.
"""
self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
self._em_width_cache = {}
try:
result = self._expression.parseString(s)
except ParseBaseException as err:
raise ValueError("\n".join([
"",
err.line,
" " * (err.column - 1) + "^",
six.text_type(err)]))
self._state_stack = None
self._em_width_cache = {}
self._expression.resetCache()
return result[0]
# The state of the parser is maintained in a stack. Upon
# entering and leaving a group { } or math/non-math, the stack
# is pushed and popped accordingly. The current state always
# exists in the top element of the stack.
class State(object):
"""
Stores the state of the parser.
States are pushed and popped from a stack as necessary, and
the "current" state is always at the top of the stack.
"""
def __init__(self, font_output, font, font_class, fontsize, dpi):
self.font_output = font_output
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self):
return Parser.State(
self.font_output,
self.font,
self.font_class,
self.fontsize,
self.dpi)
def _get_font(self):
return self._font
def _set_font(self, name):
if name in ('rm', 'it', 'bf'):
self.font_class = name
self._font = name
font = property(_get_font, _set_font)
def get_state(self):
"""
Get the current :class:`State` of the parser.
"""
return self._state_stack[-1]
def pop_state(self):
"""
Pop a :class:`State` off of the stack.
"""
self._state_stack.pop()
def push_state(self):
"""
Push a new :class:`State` onto the stack which is just a copy
of the current state.
"""
self._state_stack.append(self.get_state().copy())
def main(self, s, loc, toks):
#~ print "finish", toks
return [Hlist(toks)]
def math_string(self, s, loc, toks):
# print "math_string", toks[0][1:-1]
return self._math_expression.parseString(toks[0][1:-1])
def math(self, s, loc, toks):
#~ print "math", toks
hlist = Hlist(toks)
self.pop_state()
return [hlist]
def non_math(self, s, loc, toks):
#~ print "non_math", toks
s = toks[0].replace(r'\$', '$')
symbols = [Char(c, self.get_state()) for c in s]
hlist = Hlist(symbols)
# We're going into math now, so set font to 'it'
self.push_state()
self.get_state().font = rcParams['mathtext.default']
return [hlist]
def _make_space(self, percentage):
# All spaces are relative to em width
state = self.get_state()
key = (state.font, state.fontsize, state.dpi)
width = self._em_width_cache.get(key)
if width is None:
metrics = state.font_output.get_metrics(
state.font, rcParams['mathtext.default'], 'm', state.fontsize, state.dpi)
width = metrics.advance
self._em_width_cache[key] = width
return Kern(width * percentage)
_space_widths = { r'\ ' : 0.3,
r'\,' : 0.4,
r'\;' : 0.8,
r'\quad' : 1.6,
r'\qquad' : 3.2,
r'\!' : -0.4,
r'\/' : 0.4 }
def space(self, s, loc, toks):
assert(len(toks)==1)
num = self._space_widths[toks[0]]
box = self._make_space(num)
return [box]
def customspace(self, s, loc, toks):
return [self._make_space(float(toks[0]))]
def symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
try:
char = Char(c, self.get_state())
except ValueError:
raise ParseFatalException(s, loc, "Unknown symbol: %s" % c)
if c in self._spaced_symbols:
return [Hlist( [self._make_space(0.2),
char,
self._make_space(0.2)] ,
do_kern = False)]
elif c in self._punctuation_symbols:
return [Hlist( [char,
self._make_space(0.2)] ,
do_kern = False)]
return [char]
def unknown_symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
raise ParseFatalException(s, loc, "Unknown symbol: %s" % c)
_char_over_chars = {
# The first 2 entires in the tuple are (font, char, sizescale) for
# the two symbols under and over. The third element is the space
# (in multiples of underline height)
r'AA' : ( ('rm', 'A', 1.0), (None, '\circ', 0.5), 0.0),
}
def c_over_c(self, s, loc, toks):
sym = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
under_desc, over_desc, space = \
self._char_over_chars.get(sym, (None, None, 0.0))
if under_desc is None:
raise ParseFatalException("Error parsing symbol")
over_state = state.copy()
if over_desc[0] is not None:
over_state.font = over_desc[0]
over_state.fontsize *= over_desc[2]
over = Accent(over_desc[1], over_state)
under_state = state.copy()
if under_desc[0] is not None:
under_state.font = under_desc[0]
under_state.fontsize *= under_desc[2]
under = Char(under_desc[1], under_state)
width = max(over.width, under.width)
over_centered = HCentered([over])
over_centered.hpack(width, 'exactly')
under_centered = HCentered([under])
under_centered.hpack(width, 'exactly')
return Vlist([
over_centered,
Vbox(0., thickness * space),
under_centered
])
_accent_map = {
r'hat' : r'\circumflexaccent',
r'breve' : r'\combiningbreve',
r'bar' : r'\combiningoverline',
r'grave' : r'\combininggraveaccent',
r'acute' : r'\combiningacuteaccent',
r'ddot' : r'\combiningdiaeresis',
r'tilde' : r'\combiningtilde',
r'dot' : r'\combiningdotabove',
r'vec' : r'\combiningrightarrowabove',
r'"' : r'\combiningdiaeresis',
r"`" : r'\combininggraveaccent',
r"'" : r'\combiningacuteaccent',
r'~' : r'\combiningtilde',
r'.' : r'\combiningdotabove',
r'^' : r'\circumflexaccent',
r'overrightarrow' : r'\rightarrow',
r'overleftarrow' : r'\leftarrow'
}
_wide_accents = set(r"widehat widetilde widebar".split())
def accent(self, s, loc, toks):
assert(len(toks)==1)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
if len(toks[0]) != 2:
raise ParseFatalException("Error parsing accent")
accent, sym = toks[0]
if accent in self._wide_accents:
accent = AutoWidthChar(
'\\' + accent, sym.width, state, char_class=Accent)
else:
accent = Accent(self._accent_map[accent], state)
centered = HCentered([accent])
centered.hpack(sym.width, 'exactly')
return Vlist([
centered,
Vbox(0., thickness * 2.0),
Hlist([sym])
])
def function(self, s, loc, toks):
#~ print "function", toks
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist = Hlist([Char(c, state) for c in toks[0]])
self.pop_state()
hlist.function_name = toks[0]
return hlist
def operatorname(self, s, loc, toks):
self.push_state()
state = self.get_state()
state.font = 'rm'
# Change the font of Chars, but leave Kerns alone
for c in toks[0]:
if isinstance(c, Char):
c.font = 'rm'
c._update_metrics()
self.pop_state()
return Hlist(toks[0])
def start_group(self, s, loc, toks):
self.push_state()
# Deal with LaTeX-style font tokens
if len(toks):
self.get_state().font = toks[0][4:]
return []
def group(self, s, loc, toks):
grp = Hlist(toks[0])
return [grp]
required_group = simple_group = group
def end_group(self, s, loc, toks):
self.pop_state()
return []
def font(self, s, loc, toks):
assert(len(toks)==1)
name = toks[0]
self.get_state().font = name
return []
def is_overunder(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._overunder_symbols
elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
return nucleus.function_name in self._overunder_functions
return False
def is_dropsub(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._dropsub_symbols
return False
def is_slanted(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.is_slanted()
return False
def subsuper(self, s, loc, toks):
assert(len(toks)==1)
# print 'subsuper', toks
nucleus = None
sub = None
super = None
# Pick all of the apostrophe's out
napostrophes = 0
new_toks = []
for tok in toks[0]:
if isinstance(tok, six.string_types) and tok not in ('^', '_'):
napostrophes += len(tok)
else:
new_toks.append(tok)
toks = new_toks
if len(toks) == 0:
assert napostrophes
nucleus = Hbox(0.0)
elif len(toks) == 1:
if not napostrophes:
return toks[0] # .asList()
else:
nucleus = toks[0]
elif len(toks) == 2:
op, next = toks
nucleus = Hbox(0.0)
if op == '_':
sub = next
else:
super = next
elif len(toks) == 3:
nucleus, op, next = toks
if op == '_':
sub = next
else:
super = next
elif len(toks) == 5:
nucleus, op1, next1, op2, next2 = toks
if op1 == op2:
if op1 == '_':
raise ParseFatalException("Double subscript")
else:
raise ParseFatalException("Double superscript")
if op1 == '_':
sub = next1
super = next2
else:
super = next1
sub = next2
else:
raise ParseFatalException(
"Subscript/superscript sequence is too long. "
"Use braces { } to remove ambiguity.")
state = self.get_state()
rule_thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
xHeight = state.font_output.get_xheight(
state.font, state.fontsize, state.dpi)
if napostrophes:
if super is None:
super = Hlist([])
for i in range(napostrophes):
super.children.extend(self.symbol(s, loc, ['\prime']))
# Handle over/under symbols, such as sum or integral
if self.is_overunder(nucleus):
vlist = []
shift = 0.
width = nucleus.width
if super is not None:
super.shrink()
width = max(width, super.width)
if sub is not None:
sub.shrink()
width = max(width, sub.width)
if super is not None:
hlist = HCentered([super])
hlist.hpack(width, 'exactly')
vlist.extend([hlist, Kern(rule_thickness * 3.0)])
hlist = HCentered([nucleus])
hlist.hpack(width, 'exactly')
vlist.append(hlist)
if sub is not None:
hlist = HCentered([sub])
hlist.hpack(width, 'exactly')
vlist.extend([Kern(rule_thickness * 3.0), hlist])
shift = hlist.height
vlist = Vlist(vlist)
vlist.shift_amount = shift + nucleus.depth
result = Hlist([vlist])
return [result]
# Handle regular sub/superscripts
shift_up = nucleus.height - SUBDROP * xHeight
if self.is_dropsub(nucleus):
shift_down = nucleus.depth + SUBDROP * xHeight
else:
shift_down = SUBDROP * xHeight
if super is None:
# node757
sub.shrink()
x = Hlist([sub])
# x.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1)
clr = x.height - (abs(xHeight * 4.0) / 5.0)
shift_down = max(shift_down, clr)
x.shift_amount = shift_down
else:
super.shrink()
x = Hlist([super, Kern(SCRIPT_SPACE * xHeight)])
# x.width += SCRIPT_SPACE * xHeight
clr = SUP1 * xHeight
shift_up = max(shift_up, clr)
clr = x.depth + (abs(xHeight) / 4.0)
shift_up = max(shift_up, clr)
if sub is None:
x.shift_amount = -shift_up
else: # Both sub and superscript
sub.shrink()
y = Hlist([sub])
# y.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1 * xHeight)
clr = (2.0 * rule_thickness -
((shift_up - x.depth) - (y.height - shift_down)))
if clr > 0.:
shift_up += clr
shift_down += clr
if self.is_slanted(nucleus):
x.shift_amount = DELTA * (shift_up + shift_down)
x = Vlist([x,
Kern((shift_up - x.depth) - (y.height - shift_down)),
y])
x.shift_amount = shift_down
result = Hlist([nucleus, x])
return [result]
def _genfrac(self, ldelim, rdelim, rule, style, num, den):
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
rule = float(rule)
num.shrink()
den.shrink()
cnum = HCentered([num])
cden = HCentered([den])
width = max(num.width, den.width)
cnum.hpack(width, 'exactly')
cden.hpack(width, 'exactly')
vlist = Vlist([cnum, # numerator
Vbox(0, thickness * 2.0), # space
Hrule(state, rule), # rule
Vbox(0, thickness * 2.0), # space
cden # denominator
])
# Shift so the fraction line sits in the middle of the
# equals sign
metrics = state.font_output.get_metrics(
state.font, rcParams['mathtext.default'],
'=', state.fontsize, state.dpi)
shift = (cden.height -
((metrics.ymax + metrics.ymin) / 2 -
thickness * 3.0))
vlist.shift_amount = shift
result = [Hlist([vlist, Hbox(thickness * 2.)])]
if ldelim or rdelim:
if ldelim == '':
ldelim = '.'
if rdelim == '':
rdelim = '.'
return self._auto_sized_delimiter(ldelim, result, rdelim)
return result
def genfrac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==6)
return self._genfrac(*tuple(toks[0]))
def frac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
num, den = toks[0]
return self._genfrac('', '', thickness, '', num, den)
def stackrel(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
num, den = toks[0]
return self._genfrac('', '', 0.0, '', num, den)
def binom(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
num, den = toks[0]
return self._genfrac('(', ')', 0.0, '', num, den)
def sqrt(self, s, loc, toks):
#~ print "sqrt", toks
root, body = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
# Determine the height of the body, and add a little extra to
# the height so it doesn't seem cramped
height = body.height - body.shift_amount + thickness * 5.0
depth = body.depth + body.shift_amount
check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
height = check.height - check.shift_amount
depth = check.depth + check.shift_amount
# Put a little extra space to the left and right of the body
padded_body = Hlist([Hbox(thickness * 2.0),
body,
Hbox(thickness * 2.0)])
rightside = Vlist([Hrule(state),
Fill(),
padded_body])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
'exactly', depth)
# Add the root and shift it upward so it is above the tick.
# The value of 0.6 is a hard-coded hack ;)
if root is None:
root = Box(check.width * 0.5, 0., 0.)
else:
root = Hlist([Char(x, state) for x in root])
root.shrink()
root.shrink()
root_vlist = Vlist([Hlist([root])])
root_vlist.shift_amount = -height * 0.6
hlist = Hlist([root_vlist, # Root
# Negative kerning to put root over tick
Kern(-check.width * 0.5),
check, # Check
rightside]) # Body
return [hlist]
def overline(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==1)
body = toks[0][0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = body.height - body.shift_amount + thickness * 3.0
depth = body.depth + body.shift_amount
# Place overline above body
rightside = Vlist([Hrule(state),
Fill(),
Hlist([body])])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
'exactly', depth)
hlist = Hlist([rightside])
return [hlist]
def _auto_sized_delimiter(self, front, middle, back):
state = self.get_state()
if len(middle):
height = max([x.height for x in middle])
depth = max([x.depth for x in middle])
factor = None
else:
height = 0
depth = 0
factor = 1.0
parts = []
# \left. and \right. aren't supposed to produce any symbols
if front != '.':
parts.append(AutoHeightChar(front, height, depth, state, factor=factor))
parts.extend(middle)
if back != '.':
parts.append(AutoHeightChar(back, height, depth, state, factor=factor))
hlist = Hlist(parts)
return hlist
def auto_delim(self, s, loc, toks):
#~ print "auto_delim", toks
front, middle, back = toks
return self._auto_sized_delimiter(front, middle.asList(), back)
###
##############################################################################
# MAIN
class MathTextParser(object):
_parser = None
_backend_mapping = {
'bitmap': MathtextBackendBitmap,
'agg' : MathtextBackendAgg,
'ps' : MathtextBackendPs,
'pdf' : MathtextBackendPdf,
'svg' : MathtextBackendSvg,
'path' : MathtextBackendPath,
'cairo' : MathtextBackendCairo,
'macosx': MathtextBackendAgg,
}
_font_type_mapping = {
'cm' : BakomaFonts,
'stix' : StixFonts,
'stixsans' : StixSansFonts,
'custom' : UnicodeFonts
}
def __init__(self, output):
"""
Create a MathTextParser for the given backend *output*.
"""
self._output = output.lower()
self._cache = maxdict(50)
def parse(self, s, dpi = 72, prop = None):
"""
Parse the given math expression *s* at the given *dpi*. If
*prop* is provided, it is a
:class:`~matplotlib.font_manager.FontProperties` object
specifying the "default" font to use in the math expression,
used for all non-math text.
The results are cached, so multiple calls to :meth:`parse`
with the same expression should be fast.
"""
# There is a bug in Python 3.x where it leaks frame references,
# and therefore can't handle this caching
if prop is None:
prop = FontProperties()
cacheKey = (s, dpi, hash(prop))
result = self._cache.get(cacheKey)
if result is not None:
return result
if self._output == 'ps' and rcParams['ps.useafm']:
font_output = StandardPsFonts(prop)
else:
backend = self._backend_mapping[self._output]()
fontset = rcParams['mathtext.fontset']
fontset_class = self._font_type_mapping.get(fontset.lower())
if fontset_class is not None:
font_output = fontset_class(prop, backend)
else:
raise ValueError(
"mathtext.fontset must be either 'cm', 'stix', "
"'stixsans', or 'custom'")
fontsize = prop.get_size_in_points()
# This is a class variable so we don't rebuild the parser
# with each request.
if self._parser is None:
self.__class__._parser = Parser()
box = self._parser.parse(s, font_output, fontsize, dpi)
font_output.set_canvas_size(box.width, box.height, box.depth)
result = font_output.get_results(box)
self._cache[cacheKey] = result
return result
def to_mask(self, texstr, dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
x = ftimage.as_array()
return x, depth
def to_rgba(self, texstr, color='black', dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
Any matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)
r, g, b = mcolors.colorConverter.to_rgb(color)
RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8)
RGBA[:,:,0] = int(255*r)
RGBA[:,:,1] = int(255*g)
RGBA[:,:,2] = int(255*b)
RGBA[:,:,3] = x
return RGBA, depth
def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14):
"""
Writes a tex expression to a PNG file.
Returns the offset of the baseline from the bottom of the
image in pixels.
*filename*
A writable filename or fileobject
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
A valid matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns the offset of the baseline from the bottom of the
image in pixels.
"""
rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)
numrows, numcols, tmp = rgba.shape
_png.write_png(rgba.tostring(), numcols, numrows, filename)
return depth
def get_depth(self, texstr, dpi=120, fontsize=14):
"""
Returns the offset of the baseline from the bottom of the
image in pixels.
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
return depth
def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None):
"""
Given a math expression, renders it in a closely-clipped bounding
box to an image file.
*s*
A math expression. The math portion should be enclosed in
dollar signs.
*filename_or_obj*
A filepath or writable file-like object to write the image data
to.
*prop*
If provided, a FontProperties() object describing the size and
style of the text.
*dpi*
Override the output dpi, otherwise use the default associated
with the output format.
*format*
The output format, e.g., 'svg', 'pdf', 'ps' or 'png'. If not
provided, will be deduced from the filename.
"""
from matplotlib import figure
# backend_agg supports all of the core output formats
from matplotlib.backends import backend_agg
if prop is None:
prop = FontProperties()
parser = MathTextParser('path')
width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
fig = figure.Figure(figsize=(width / 72.0, height / 72.0))
fig.text(0, depth/height, s, fontproperties=prop)
backend_agg.FigureCanvasAgg(fig)
fig.savefig(filename_or_obj, dpi=dpi, format=format)
return depth
| mit | 6,743,337,852,527,449,000 | 34.235275 | 117 | 0.521474 | false |
ema/conpaas | conpaas-services/src/conpaas/core/clouds/opennebula.py | 1 | 5532 | # -*- coding: utf-8 -*-
"""
conpaas.core.clouds.opennebula
==============================
ConPaaS core: OpenNebula IaaS code.
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
import urlparse
from ConfigParser import NoOptionError
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.base import NodeImage
from .base import Cloud
DEFAULT_API_VERSION = '2.2'
class OpenNebulaCloud(Cloud):
def __init__(self, cloud_name, iaas_config):
Cloud.__init__(self, cloud_name)
# required parameters to describe this cloud
cloud_params = ['URL', 'USER', 'PASSWORD',
'IMAGE_ID', 'INST_TYPE',
'NET_ID', 'NET_GATEWAY',
'NET_NETMASK', 'NET_NAMESERVER',
'OS_ARCH',
'OS_ROOT',
'DISK_TARGET',
'CONTEXT_TARGET']
self._check_cloud_params(iaas_config, cloud_params)
def _get(param):
return iaas_config.get(cloud_name, param)
self.url = _get('URL')
self.user = _get('USER')
self.passwd = _get('PASSWORD')
self.img_id = _get('IMAGE_ID')
self.inst_type = _get('INST_TYPE')
self.net_id = _get('NET_ID')
self.net_gw = _get('NET_GATEWAY')
self.net_nm = _get('NET_NETMASK')
self.net_ns = _get('NET_NAMESERVER')
self.os_arch = _get('OS_ARCH')
self.os_root = _get('OS_ROOT')
self.disk_target = _get('DISK_TARGET')
self.context_target = _get('CONTEXT_TARGET')
try:
self.api_version = _get('OPENNEBULA_VERSION')
except NoOptionError:
self.api_version = DEFAULT_API_VERSION
self.cpu = None
self.mem = None
self.logger.info('OpenNebula cloud ready. API_VERSION=%s' %
self.api_version)
def get_cloud_type(self):
return 'opennebula'
def _connect(self):
"""connect to opennebula cloud"""
parsed = urlparse.urlparse(self.url)
ONDriver = get_driver(Provider.OPENNEBULA)
self.driver = ONDriver(self.user,
secret=self.passwd,
secure=(parsed.scheme == 'https'),
host=parsed.hostname,
port=parsed.port,
api_version=self.api_version)
self.connected = True
def get_context(self):
cx = Cloud.get_context(self)
return cx.encode('hex')
def config(self, config_params={}, context=None):
if 'inst_type' in config_params:
self.inst_type = config_params['inst_type']
if 'cpu' in config_params:
self.cpu = config_params['cpu']
if 'mem' in config_params:
self.mem = config_params['mem']
if context is not None:
self._context = context
def list_vms(self):
return Cloud.list_vms(self, False)
def list_instace_types(self):
return self.inst_types
def new_instances(self, count, name='conpaas', inst_type=None):
'''Asks the provider for new instances.
@param count: Id of the node type of this driver (optional)
'''
if self.connected is False:
self._connect()
kwargs = {}
# 'NAME'
kwargs['name'] = name
# 'INSTANCE_TYPE'
if inst_type is None:
inst_type = self.inst_type
# available sizes
self.logger.debug('new_instances: calling self.driver.list_sizes')
sizes = self.driver.list_sizes()
self.logger.debug('new_instances: self.driver.list_sizes returned %s' %
sizes)
# available size names
size_names = [ size.name for size in sizes ]
try:
# index of the size we want
size_idx = size_names.index(inst_type)
except ValueError:
# size not found
raise Exception("Requested size not found. '%s' not in %s" % (
inst_type, size_names))
kwargs['size'] = sizes[size_idx]
# 'CPU'
if self.cpu is not None:
kwargs['cpu'] = self.cpu
# 'MEM'
if self.mem is not None:
kwargs['mem'] = self.mem
# 'OS'
kwargs['os_arch'] = self.os_arch
kwargs['os_root'] = self.os_root
# 'DISK'
kwargs['image'] = NodeImage(self.img_id, '', None)
kwargs['disk_target'] = self.disk_target
# 'NIC': str(network.id) is how libcloud gets the network ID. Let's
# create an object just like that and pass it in the 'networks' kwarg
class OneNetwork(object):
def __str__(self):
return str(self.id)
network = OneNetwork()
network.id = self.net_id
network.address = None
kwargs['networks'] = network
# 'CONTEXT'
context = {}
context['HOSTNAME'] = '$NAME'
context['IP_PUBLIC'] = '$NIC[IP]'
context['IP_GATEWAY'] = self.net_gw
context['NETMASK'] = self.net_nm
context['NAMESERVER'] = self.net_ns
context['USERDATA'] = self.get_context()
context['TARGET'] = self.context_target
kwargs['context'] = context
return [self._create_service_nodes(self.driver.
create_node(**kwargs), False) for _ in range(count)]
| bsd-3-clause | 3,206,006,294,396,996,600 | 29.065217 | 79 | 0.535973 | false |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/music21/demos/trecento/largestAmbitus.py | 1 | 2001 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: demos/trecento/largestAmbitus.py
# Purpose: find Trecento/ars nova pieces with large ambitus
#
# Authors: Michael Scott Cuthbert
#
# Copyright: Copyright © 2012 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
On September 11, 2012, Camilla Cavicchi reported to me the finding of
a new fragment in the Ferrara archives. One unknown piece has an extraordinary
large range in the top voice: a 15th within a few notes. The clefs can't
be read and the piece is an adaptation into
Stroke notation, so it's unlikely to have an exact match in the database
(also the piece is probably from the 1430s [MSC, guess, not CC], so it's
not likely to be in the Trecento database anyhow).
This demo uses the .analyze('ambitus') function of music21 to try
to find a match for the ambitus (or at least narrow down the search for others)
by finding all parts within pieces where the range is at least a 15th.
'''
from music21 import corpus, converter
def main():
trecentoFiles = corpus.getWork('trecento')
for t in trecentoFiles:
print (t)
tparsed = converter.parse(t)
for p in tparsed.parts:
ambi = p.analyze('ambitus')
distance = ambi.diatonic.generic.undirected
if distance >= 15:
print ("************ GOT ONE!: {0} ************".format(ambi))
elif distance >= 9:
print (ambi)
else:
pass
#-------------------------------------------------------------------------------
# define presented order in documentation
_DOC_ORDER = []
if __name__ == "__main__":
main()
#------------------------------------------------------------------------------
# eof
| mit | 5,355,574,308,488,821,000 | 37.215686 | 80 | 0.5285 | false |
zhengjue/mytornado | study/2/filecmp/simple2.py | 1 | 1828 | #!/usr/bin/env python
import os, sys
import filecmp
import re
import shutil
holderlist=[]
def compareme(dir1, dir2):
dircomp=filecmp.dircmp(dir1,dir2)
only_in_one=dircomp.left_only
diff_in_one=dircomp.diff_files
dirpath=os.path.abspath(dir1)
[holderlist.append(os.path.abspath( os.path.join(dir1,x) )) for x in only_in_one]
[holderlist.append(os.path.abspath( os.path.join(dir1,x) )) for x in diff_in_one]
if len(dircomp.common_dirs) > 0:
for item in dircomp.common_dirs:
compareme(os.path.abspath(os.path.join(dir1,item)), \
os.path.abspath(os.path.join(dir2,item)))
return holderlist
def main():
if len(sys.argv) > 2:
dir1=sys.argv[1]
dir2=sys.argv[2]
else:
print "Usage: ", sys.argv[0], "datadir backupdir"
sys.exit()
source_files=compareme(dir1,dir2)
dir1=os.path.abspath(dir1)
if not dir2.endswith('/'): dir2=dir2+'/'
dir2=os.path.abspath(dir2)
destination_files=[]
createdir_bool=False
for item in source_files:
destination_dir=re.sub(dir1, dir2, item)
destination_files.append(destination_dir)
if os.path.isdir(item):
if not os.path.exists(destination_dir):
os.makedirs(destination_dir)
createdir_bool=True
if createdir_bool:
destination_files=[]
source_files=[]
source_files=compareme(dir1,dir2)
for item in source_files:
destination_dir=re.sub(dir1, dir2, item)
destination_files.append(destination_dir)
print "update item:"
print source_files
copy_pair=zip(source_files,destination_files)
for item in copy_pair:
if os.path.isfile(item[0]):
shutil.copyfile(item[0], item[1])
if __name__ == '__main__':
main()
| gpl-3.0 | 5,692,140,856,921,574,000 | 28.015873 | 85 | 0.622538 | false |
cjlee112/socraticqs2 | mysite/pages/migrations/0008_listplugin.py | 1 | 1139 | from django.db import models, migrations
import djangocms_text_ckeditor.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0012_auto_20150607_2207'),
('pages', '0007_activelearningratesplugin'),
]
operations = [
migrations.CreateModel(
name='ListPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin', on_delete=models.CASCADE)),
('title', models.CharField(max_length=70, blank=True)),
('description_header', djangocms_text_ckeditor.fields.HTMLField(blank=True)),
('list_type', models.CharField(default='list-questions', max_length=20, choices=[('list-questions', 'list-questions')])),
('list_text', djangocms_text_ckeditor.fields.HTMLField()),
('description_footer', djangocms_text_ckeditor.fields.HTMLField(blank=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| apache-2.0 | 4,625,556,113,305,008,000 | 39.678571 | 174 | 0.592625 | false |
davidam/python-examples | basics/allindices.py | 1 | 1128 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <[email protected]>
# Maintainer: David Arroyo Menéndez <[email protected]>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
#!/usr/bin/python# -*- coding: utf-8 -*-
aList = [123, 'xyz', 'zara', 'abc'];
print("Index for xyz : %s" % aList.index( 'xyz' ))
print("Index for zara : %s" % aList.index( 'zara' ))
#all_indices("foo", ["foo","bar","baz","foo"])
| gpl-3.0 | -5,417,548,355,546,397,000 | 40.666667 | 70 | 0.712 | false |
bslatkin/pycon2014 | lib/asyncio-0.4.1/tests/test_selectors.py | 1 | 6463 | """Tests for selectors.py."""
import unittest
import unittest.mock
from asyncio import selectors
class FakeSelector(selectors._BaseSelectorImpl):
"""Trivial non-abstract subclass of BaseSelector."""
def select(self, timeout=None):
raise NotImplementedError
class _SelectorMappingTests(unittest.TestCase):
def test_len(self):
s = FakeSelector()
map = selectors._SelectorMapping(s)
self.assertTrue(map.__len__() == 0)
f = unittest.mock.Mock()
f.fileno.return_value = 10
s.register(f, selectors.EVENT_READ, None)
self.assertTrue(len(map) == 1)
def test_getitem(self):
s = FakeSelector()
map = selectors._SelectorMapping(s)
f = unittest.mock.Mock()
f.fileno.return_value = 10
s.register(f, selectors.EVENT_READ, None)
attended = selectors.SelectorKey(f, 10, selectors.EVENT_READ, None)
self.assertEqual(attended, map.__getitem__(f))
def test_getitem_key_error(self):
s = FakeSelector()
map = selectors._SelectorMapping(s)
self.assertTrue(len(map) == 0)
f = unittest.mock.Mock()
f.fileno.return_value = 10
s.register(f, selectors.EVENT_READ, None)
self.assertRaises(KeyError, map.__getitem__, 5)
def test_iter(self):
s = FakeSelector()
map = selectors._SelectorMapping(s)
self.assertTrue(len(map) == 0)
f = unittest.mock.Mock()
f.fileno.return_value = 5
s.register(f, selectors.EVENT_READ, None)
counter = 0
for fileno in map.__iter__():
self.assertEqual(5, fileno)
counter += 1
for idx in map:
self.assertEqual(f, map[idx].fileobj)
self.assertEqual(1, counter)
class BaseSelectorTests(unittest.TestCase):
def test_fileobj_to_fd(self):
self.assertEqual(10, selectors._fileobj_to_fd(10))
f = unittest.mock.Mock()
f.fileno.return_value = 10
self.assertEqual(10, selectors._fileobj_to_fd(f))
f.fileno.side_effect = AttributeError
self.assertRaises(ValueError, selectors._fileobj_to_fd, f)
f.fileno.return_value = -1
self.assertRaises(ValueError, selectors._fileobj_to_fd, f)
def test_selector_key_repr(self):
key = selectors.SelectorKey(10, 10, selectors.EVENT_READ, None)
self.assertEqual(
"SelectorKey(fileobj=10, fd=10, events=1, data=None)", repr(key))
def test_register(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
s = FakeSelector()
key = s.register(fobj, selectors.EVENT_READ)
self.assertIsInstance(key, selectors.SelectorKey)
self.assertEqual(key.fd, 10)
self.assertIs(key, s._fd_to_key[10])
def test_register_unknown_event(self):
s = FakeSelector()
self.assertRaises(ValueError, s.register, unittest.mock.Mock(), 999999)
def test_register_already_registered(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
s = FakeSelector()
s.register(fobj, selectors.EVENT_READ)
self.assertRaises(KeyError, s.register, fobj, selectors.EVENT_READ)
def test_unregister(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
s = FakeSelector()
s.register(fobj, selectors.EVENT_READ)
s.unregister(fobj)
self.assertFalse(s._fd_to_key)
def test_unregister_unknown(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
s = FakeSelector()
self.assertRaises(KeyError, s.unregister, fobj)
def test_modify_unknown(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
s = FakeSelector()
self.assertRaises(KeyError, s.modify, fobj, 1)
def test_modify(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
s = FakeSelector()
key = s.register(fobj, selectors.EVENT_READ)
key2 = s.modify(fobj, selectors.EVENT_WRITE)
self.assertNotEqual(key.events, key2.events)
self.assertEqual(
selectors.SelectorKey(fobj, 10, selectors.EVENT_WRITE, None),
s.get_key(fobj))
def test_modify_data(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
d1 = object()
d2 = object()
s = FakeSelector()
key = s.register(fobj, selectors.EVENT_READ, d1)
key2 = s.modify(fobj, selectors.EVENT_READ, d2)
self.assertEqual(key.events, key2.events)
self.assertNotEqual(key.data, key2.data)
self.assertEqual(
selectors.SelectorKey(fobj, 10, selectors.EVENT_READ, d2),
s.get_key(fobj))
def test_modify_data_use_a_shortcut(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
d1 = object()
d2 = object()
s = FakeSelector()
key = s.register(fobj, selectors.EVENT_READ, d1)
s.unregister = unittest.mock.Mock()
s.register = unittest.mock.Mock()
key2 = s.modify(fobj, selectors.EVENT_READ, d2)
self.assertFalse(s.unregister.called)
self.assertFalse(s.register.called)
def test_modify_same(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
data = object()
s = FakeSelector()
key = s.register(fobj, selectors.EVENT_READ, data)
key2 = s.modify(fobj, selectors.EVENT_READ, data)
self.assertIs(key, key2)
def test_select(self):
s = FakeSelector()
self.assertRaises(NotImplementedError, s.select)
def test_close(self):
s = FakeSelector()
s.register(1, selectors.EVENT_READ)
s.close()
self.assertFalse(s._fd_to_key)
def test_context_manager(self):
s = FakeSelector()
with s as sel:
sel.register(1, selectors.EVENT_READ)
self.assertFalse(s._fd_to_key)
def test_key_from_fd(self):
s = FakeSelector()
key = s.register(1, selectors.EVENT_READ)
self.assertIs(key, s._key_from_fd(1))
self.assertIsNone(s._key_from_fd(10))
if hasattr(selectors.DefaultSelector, 'fileno'):
def test_fileno(self):
self.assertIsInstance(selectors.DefaultSelector().fileno(), int)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 1,686,680,130,838,674,400 | 29.200935 | 79 | 0.60916 | false |
Macainian/BaseDjangoProject | website/apps/search_filter_sort/views/class_based/BaseBrowseView.py | 1 | 10170 | import operator
import logging
from functools import reduce
from django.db.models import Q
from django.views.generic import ListView
from django.conf import settings
from website.apps.search_filter_sort.utils.misc import class_strings_to_class, convert_age_to_date
from website.mixins import LoginRequiredMixin
logger = logging.getLogger(__name__)
USER_SEARCH_LIST_DEFAULT = ["username", "first_name", "last_name", "email"]
if hasattr(settings, "USER_SEARCH_LIST"):
USER_SEARCH_LIST = settings.USER_SEARCH_LIST
else:
USER_SEARCH_LIST = USER_SEARCH_LIST_DEFAULT
class BaseBrowseView(LoginRequiredMixin, ListView):
template_name = None
model = None
should_override_pagination = False
searches = []
filters = []
filter_names = []
sorts = []
default_sort_by = []
default_pagination = 25
search_by = None
using_filters = None
def get_context_data(self, **kwargs):
context = super(BaseBrowseView, self).get_context_data(**kwargs)
# check_search_fields()
context["paginate_by"] = self.paginate_by
context["search_by"] = self.search_by
context["filters"] = self.filters
context["filter_names"] = self.filter_names
context["using_filters"] = self.using_filters
context["default_pagination"] = self.default_pagination
return context
def get_queryset(self):
self.searches = self.search_fields(self.model, [])
if not self.should_override_pagination:
try:
self.paginate_by = int(self.request.GET.get("paginate_by", self.default_pagination))
except:
self.paginate_by = self.default_pagination
should_return_empty = self.request.GET.get("__RETURN_EMPTY__", None)
if should_return_empty:
return self.model.objects.none()
search_bys = self.request.GET.get("search_by", None)
filter_names = self.request.GET.getlist("filter_name", None)
filter_values = self.request.GET.getlist("filter_value", None)
sort_bys = self.request.GET.getlist("sort_by", self.default_sort_by)
search_list = self.get_search_list(search_bys)
filter_list = self.get_filter_list(filter_names, filter_values)
sort_list = self.get_sort_list(sort_bys)
# Search, filter, sort
if search_list:
list_of_search_bys_Q = [Q(**{key: value}) for key, value in search_list.items()]
search_reduce = reduce(operator.or_, list_of_search_bys_Q)
else:
search_reduce = None
if filter_list:
list_of_filter_bys_Q = [[Q(**{key: value}) for value in array] for key, array in filter_list.items()]
reduced_filters = []
for array in list_of_filter_bys_Q:
reduced_filters.append(reduce(operator.or_, array))
filter_reduce = reduce(operator.and_, reduced_filters)
self.using_filters = True
else:
filter_reduce = None
self.using_filters = False
if search_reduce and filter_reduce:
queryset = self.model.objects.filter(search_reduce).filter(filter_reduce).distinct().order_by(*sort_list)
elif search_reduce:
queryset = self.model.objects.filter(search_reduce).distinct().order_by(*sort_list)
elif filter_reduce:
queryset = self.model.objects.filter(filter_reduce).distinct().order_by(*sort_list)
else:
queryset = self.model.objects.order_by(*sort_list)
return queryset
def get_search_list(self, search_bys):
# Determine search_list
search_list = {}
if search_bys:
self.search_by = search_bys
search_terms = []
for term in search_bys.split():
search_terms.append(term)
for field in self.searches:
field += "__icontains"
for term in search_terms:
search_list[field] = term
else:
self.search_by = ""
return search_list
def get_filter_list(self, filter_names, filter_values):
# Determine filter_list
filter_list = {}
self.define_filters()
for i in range(len(filter_names)):
filter_name = filter_names[i]
# This is only false if there are more filter_names than filter_values. Should be equal.
if i < len(filter_values):
values = filter_values[i].split(",")
if "__lte_age" in filter_name or "__lt_age" in filter_name:
values = [convert_age_to_date(int(filter_values[i]))]
filter_name = filter_name.replace("__lte_age", "__lte")
filter_name = filter_name.replace("__lt_age", "__lt")
elif "__lte_number" in filter_name or "__lt_number" in filter_name:
filter_name = filter_name.replace("__lte_number", "__lte")
filter_name = filter_name.replace("__lt_number", "__lt")
if "__gte_age" in filter_name or "__gt_age" in filter_name:
values = [convert_age_to_date(int(filter_values[i]) + 1)]
filter_name = filter_name.replace("__gte_age", "__gte")
filter_name = filter_name.replace("__gt_age", "__gt")
elif "__gte_number" in filter_name or "__gt_number" in filter_name:
filter_name = filter_name.replace("__gte_number", "__gte")
filter_name = filter_name.replace("__gt_number", "__gt")
new_values = []
for value in values:
if value == "__NONE_OR_BLANK__":
new_values.append("")
value = None
elif value == "__NONE__":
value = None
elif value == "__BLANK__":
value = ""
elif value == "__TRUE__":
value = True
elif value == "__FALSE__":
value = False
new_values.append(value)
values = new_values
filter_list[filter_name] = values
else:
break
return filter_list
def get_sort_list(self, sort_bys):
# Determine sort_list
sort_list = list(sort_bys)
count = 0
for i in range(len(sort_bys)):
if "-" in sort_bys[i]:
base_sort = sort_bys[i].split("-")[1]
else:
base_sort = sort_bys[i]
if base_sort not in self.sorts:
sort_list.remove(sort_bys[i])
logger.debug("Sort of " + base_sort + " is not in the sorts.")
count -= 1
elif "last_name" in sort_bys[i]: # Special clause for last_names/first_names
sort_list.insert(count, sort_bys[i].replace("last_name", "first_name"))
count += 1
elif base_sort == "birthday": # Special clause for birthday/age. Need to reverse order because it is backwards for some reason.
if sort_bys[i] == "birthday":
sort_list[count] = "-birthday"
else:
sort_list[count] = "birthday"
count += 1
return sort_list
def define_filters(self):
self.filters = []
self.filter_names = []
def add_select_filter(self, html_name, filter_name, html_options_code):
html_code = '<select class="multi-select form-control" id="' + filter_name + '-filter" name="' + filter_name + '_filter" autocomplete="off" multiple>'
html_code += html_options_code + '</select>'
self.filters.append(
{
"filter_name": filter_name,
"html_name": html_name,
"html_code": html_code
})
self.filter_names.append(filter_name)
def add_number_range_filter(self, html_name, lower_filter_name, upper_filter_name, max_width="50px", step_size="1"):
html_code = \
'<input type="number" class="range-filter form-control" id="' + lower_filter_name + '-filter" ' + \
'name="' + lower_filter_name + '" step="' + step_size + '" style="max-width: ' + max_width + '" />' + \
'<b> - </b>' + \
'<input type="number" class="range-filter form-control" id="' + upper_filter_name + '-filter" ' + \
'name="' + upper_filter_name + '" step="' + step_size + '" style="max-width: ' + max_width + '" />'
self.filters.append(
{
"html_name": html_name,
"html_code": html_code
})
self.filter_names.append(lower_filter_name)
self.filter_names.append(upper_filter_name)
def search_fields(self, class_object, list_of_used_classes):
object_search_list = []
if class_object in list_of_used_classes:
return []
else:
list_of_used_classes.append(class_object)
if class_object.__name__ == "User":
search_list = [search_item for search_item in USER_SEARCH_LIST]
else:
object_dependencies = class_object.object_dependencies()
for object_dependency in object_dependencies:
if object_dependency[2] == "User":
object_search_list += [
str(object_dependency[0] + "__{0}").format(search_item) for search_item in USER_SEARCH_LIST
]
else:
other_class_object = class_strings_to_class(object_dependency[1], object_dependency[2])
other_object_search_list = self.search_fields(other_class_object, list_of_used_classes)
object_search_list += [str(object_dependency[0] + "__{0}").format(search_item) for search_item in
other_object_search_list]
search_list = class_object.basic_search_list() + class_object.special_search_list() + object_search_list
return search_list
| mit | -8,668,220,080,105,243,000 | 37.669202 | 158 | 0.550147 | false |
luzhijun/Optimization | cma-es/batchcompute_python_sdk/vp/testWork.py | 1 | 1368 | #!usr/bin/env python
#encoding: utf-8
import os
import sys
import config as cfg
from math import sqrt
from simple_oss import SimpleOss
import json
TASK_ID = os.environ.get('ALI_DIKU_TASK_ID')
INSTANCE_COUNT = int(os.environ.get('INSTANCE_COUNT'))
INSTANCE_ID = int(os.environ.get('ALI_DIKU_INSTANCE_ID'))
OSS_HOST = os.environ.get('ALI_DIKU_OSS_HOST')
oss_clnt = SimpleOss(cfg.OSS_HOST, cfg.ID, cfg.KEY)
def get_json(filePath,instance_count, instance_id):
json_cfg=oss_clnt.download_str(cfg.BUCKET,cfg.DATA_PATH)
return json.loads(json_cfg)
def find_task():
is_prime = lambda x: 0 not in [ x%d for d in range(2, int(sqrt(x))+1)]
s, e = get_range(cfg.DATA_START, cfg.DATA_END, INSTANCE_COUNT, INSTANCE_ID)
f = open('result.txt', 'w')
for num in xrange(s, e):
if is_prime(num):
f.write(str(num) + '\n')
f.close()
oss_clnt.upload(cfg.OSS_BUCKET, 'result.txt', cfg.FIND_OUTPUT_PATH%INSTANCE_ID)
return 0
def count_task():
s = ""
for instance_id in range(INSTANCE_COUNT):
instance_result = oss_clnt.download_str(BUCKET, FIND_OUTPUT_PATH%instance_id)
s+=instance_result
oss_clnt.upload_str(cfg.OSS_BUCKET, s, cfg.COUNT_OUTPUT_PATH)
def main():
if TASK_ID == 'Find':
find_task()
else:
count_task()
return 0
if __name__ == '__main__':
sys.exit(main()) | apache-2.0 | -6,602,918,301,452,156,000 | 26.938776 | 85 | 0.647661 | false |
boreq/archive_chan | archive_chan/management/commands/archive_chan_update.py | 1 | 2562 | import datetime, sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.utils.timezone import utc
from tendo import singleton
from archive_chan.models import Board, Update
from archive_chan.lib.scraper import BoardScraper
from archive_chan.settings import AppSettings
class Command(BaseCommand):
args = ''
help = 'Scraps threads from all active boards. This command should be run periodically to download new threads, posts and images.'
option_list = BaseCommand.option_list + (
make_option(
'--progress',
action="store_true",
dest='progress',
help='Display progress.',
),
)
def handle(self, *args, **options):
# Prevent multiple instances. Apparently fcntl.lockf is very useful and does completely nothing.
me = singleton.SingleInstance()
boards = Board.objects.filter(active=True)
# Show progress?
if options['progress']:
progress = True
else:
progress = False
# Get new data for each board.
for board in boards:
# Info.
processing_start = datetime.datetime.utcnow().replace(tzinfo=utc)
update = Update.objects.create(board=board, start=processing_start, used_threads = AppSettings.get('SCRAPER_THREADS_NUMBER'))
try:
# Actual update.
scraper = BoardScraper(board, progress=progress)
scraper.update()
# Info.
update.status = Update.COMPLETED
except Exception as e:
sys.stderr.write('%s\n' % (e))
finally:
# Info.
try:
if update.status != Update.COMPLETED:
update.status = Update.FAILED
processing_end = datetime.datetime.utcnow().replace(tzinfo=utc)
processing_time = processing_end - processing_start
update.end = processing_end
update = scraper.stats.add_to_record(update, processing_time)
except Exception as e:
sys.stderr.write('%s\n' % (e))
finally:
update.save()
# Everything below is just info.
print('%s Board: %s %s' % (
datetime.datetime.now(),
board,
scraper.stats.get_text(processing_time),
))
| gpl-2.0 | -655,112,818,715,229,200 | 32.272727 | 137 | 0.559329 | false |
maxogden/Locker | Connectors/XMPP/webservice.py | 1 | 2723 | import sys
import json
import logging
from flask import Flask, render_template, request, redirect, url_for
sys.path.append("../../Common/python")
import lockerfs
import client
import util
app = Flask(__name__)
@app.route("/setupAuth")
def setupAuth():
return render_template("setupAuth.html")
@app.route("/save", methods=['POST'])
def saveAuth():
logging.info("Saving auth")
secrets = lockerfs.loadJsonFile("secrets.json")
secrets["jid"] = request.form["jid"]
secrets["password"] = request.form["password"]
lockerfs.saveJsonFile("secrets.json", secrets)
start()
return json.dumps("started")
def start():
logging.info("Starting")
secrets = lockerfs.loadJsonFile("secrets.json")
app.client = client.Client(app.info, jid=secrets["jid"], password=secrets["password"])
if app.client.connect():
app.client.process(threaded=True)
app.started = True
else:
util.die("XMPP connection failed")
@app.route("/")
def index():
if app.started:
return json.dumps({
"/messages" : "All messages received. Filter by: body, from, mucnick, mucroom, to, type, id, subject",
"/statuses" : "All status updates received. Filter by: status, from, show, priority, type, id",
"/roster" : "Current roster (at time of login)"
})
else:
return redirect(url_for("setupAuth"))
def matches_arg(value, arg):
# either a literal match or a range [lo,hi]
if type(arg) is list and len(arg) is 2:
(lo, hi) = arg
return (lo <= value) and (value < hi)
else:
return (value == arg)
@app.route("/messages")
def messages():
messages = app.client.messages
for key, value in request.args.items():
messages = [msg for msg in messages if matches_arg(msg[key], json.loads(value))]
return json.dumps(messages)
@app.route("/statuses")
def statuses():
statuses = app.client.statuses
for key, value in request.args.items():
statuses = [sts for sts in statuses if matches_arg(sts[key], json.loads(value))]
return json.dumps(statuses)
@app.route("/roster")
def roster():
return json.dumps(app.client.fetch_roster())
def runService(info):
app.info = info
app.client = None
app.started = False
secrets = lockerfs.loadJsonFile("secrets.json")
if "jid" in secrets and "password" in secrets:
start()
else:
logging.info("No auth details available")
app.debug = True
app.run(port=app.info["port"], use_reloader=False)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(levelname)-8s %(message)s')
runService({"port": 7474})
| bsd-3-clause | -8,044,905,973,466,973,000 | 28.597826 | 118 | 0.632758 | false |
doingmathwithpython/code | chapter3/solutions/stats.py | 1 | 1385 | '''
stats.py
Python module with functions for calculating common statistical measures
'''
from collections import Counter
def mean(numbers):
s = sum(numbers)
N = len(numbers)
mean = s/N
return mean
def median(numbers):
# find the numnber of items
N = len(numbers)
# sort the list in ascending order
numbers = sorted(numbers)
# find the median
if N % 2 == 0:
# if N is even
m1 = N/2
m2 = (N/2) + 1
# convert to integer, match position
m1 = int(m1) - 1
m2 = int(m2) - 1
median = (numbers[m1] + numbers[m2])/2
else:
m = (N+1)/2
# convert to integer, match position
m = int(m) - 1
median = numbers[m]
return median
def mode(numbers):
c = Counter(numbers)
mode = c.most_common(1)
return mode[0][0]
def find_differences(numbers):
m = mean(numbers)
# find the differences from the mean
diff = []
for num in numbers:
diff.append(num-m)
return diff
def variance_sd(numbers):
# find the list of differences
diff = find_differences(numbers)
# find the squared differences
squared_diff = []
for d in diff:
squared_diff.append(d**2)
# find the variance
sum_squared_diff = sum(squared_diff)
variance = sum_squared_diff/len(numbers)
return variance, variance**0.5
| mit | 209,777,365,508,709,760 | 20.640625 | 72 | 0.59278 | false |
jejimenez/invetronic | templated_docs_adecuated/templatetags/templated_docs_tags.py | 1 | 2274 | # --coding: utf8--
import os.path
from django.db.models.fields.files import ImageFieldFile
from django.utils.safestring import mark_safe
from django.utils.html import escape
from django import template
register = template.Library()
PIXEL_TO_CM = 0.00846666
class ImageNode(template.Node):
def __init__(self, value):
self.value = template.Variable(value)
def render(self, context):
try:
self.value = self.value.resolve(context)
if not isinstance(self.value, ImageFieldFile):
raise template.VariableDoesNotExist(
'Image argument should be an ImageField')
images = context.dicts[0].setdefault('ootemplate_imgs', {})
id = len(images)
z_index = id + 3 # Magic
width = self.value.width * PIXEL_TO_CM
height = self.value.height * PIXEL_TO_CM
filename = os.path.basename(self.value.name)
basename = os.path.splitext(filename)[0]
images[self.value.path] = self.value
img_frame = '<draw:frame draw:style-name="gr%(z_index)s" ' \
'draw:name="%(basename)s" ' \
'draw:id="id%(id)s" ' \
'text:anchor-type="paragraph" svg:width="%(width)fcm" ' \
'svg:height="%(height)fcm" draw:z-index="%(z_index)s">' \
'<draw:image xlink:href="Pictures/%(filename)s" ' \
'xlink:type="simple" xlink:show="embed" ' \
'xlink:actuate="onLoad"/></draw:frame>'
return (img_frame) % locals()
except template.VariableDoesNotExist:
return ''
@register.tag
def image(parser, token):
"""
Insert an image from a ImageField into a document.
"""
try:
tag_name, value = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
'%r tag requires a file as an argument' % tag_name)
return ImageNode(value)
@register.filter
def lolinebreaks(value):
"""
LibreOffice-flavored ``linebreaks`` filter.
"""
if not value:
return ''
paragraphs = [line for line in escape(value).splitlines()]
return mark_safe('<text:line-break/>'.join(paragraphs))
| mit | 3,747,650,145,073,412,600 | 31.956522 | 77 | 0.583553 | false |
haxandsnax/AnkhUtils | AnkhUtils/__init__.py | 1 | 4831 | import json
import os
import codecs
import math
#---------------------------------------
# inject decorator inserts the Utils object so it can be
# used directly in your Execute, Init functions etc.
#---------------------------------------
def inject(util):
def injectfn(fn):
def wrapped(*args, **kwargs):
util.SetData(fn.__globals__.get('Parent'), args, kwargs)
fn.__globals__['Utils'] = util
return fn(*args, **kwargs)
return wrapped
return injectfn
#---------------------------------------
# Call this to create the initial Utils object
#---------------------------------------
def setup(script, command):
return UtilClass(script, command)
#---------------------------------------
# Add functions to this class to expand functionality
#---------------------------------------
class UtilClass:
def __init__(self, scriptname, commandnames):
self.ScriptName = scriptname
if isinstance(commandnames, basestring):
self.CommandNames = [commandnames.lower()]
else:
self.CommandNames = map(lambda x: x.lower(), commandnames)
self.Settings = dict()
self.Data = None
# Called when injected into Execute, Init etc
# Extracts Data object from parameter if it exists, such as in Execute
def SetData(self, Parent, args, kwargs):
self.Parent = Parent
for arg in args:
try:
if 'User' in dir(arg):
self.Data = arg
except Exception as e:
self.Log('[AnkhUtils] Unable to set data object. Error: {0}'.format(str(e)))
def ProcessCommand(self):
# No data, so it's not a command
if self.Data is None:
return
if not self.Data.IsChatMessage() or self.Data.GetParamCount() == 0:
return
match = None
command = self.Data.GetParam(0).lower()
for name in self.CommandNames:
if command == name:
match = command
break
if not match:
return
params = [self.Data.GetParam(i) for i in range(1, self.Data.GetParamCount())]
return CommandMatch(self.Data.User, match, self.CommandNames, params)
# Logging with string formatting. Also keeps you from having to add
# ScriptName parameter every time
# Usage: Utils.Log('{0} + {0} = {1}', 2, 4)
def Log(self, str, *args):
if len(args) > 0:
try:
self.Parent.Log(self.ScriptName, str.format(*args))
except Exception as e:
self.Parent.Log(self.ScriptName, '[AnkhUtils] Invalid format string or parameters for Utils.Log')
else:
self.Parent.Log(self.ScriptName, str)
# Allows you to set the settings object directly.
def SetSettings(self, data):
self.Settings = json.loads(data)
# Loads settings from a file. Pass __file__ from your script
# to load relative to your script. Optionally override the filename
def ReloadSettings(self, base, filename='settings.json'):
try:
with codecs.open(os.path.join(os.path.dirname(base), filename), encoding='utf-8-sig') as jsonData:
self.SetSettings(jsonData.read())
return self.Settings
except Exception as e:
self.Log('[AnkhUtils] Error loading {0}: {1}'.format(filename, str(e)))
return
# Helper to get pretty formatted cooldown text from Seconds remaining
def CooldownText(self, cd, seconds=True, minutes=True, hours=True):
h = int(math.floor(cd/3600))
m = int(math.floor((cd%3600)/60))
s = cd % 60
hourtext = '{0} hour{1}'.format(h, '' if h == 1 else 's') if hours and h > 0 else ''
minutetext = '{0} minute{1}'.format(m, '' if m == 1 else 's') if minutes and m > 0 else ''
secondtext = '{0} second{1}'.format(s, '' if s == 1 else 's') if seconds and s > 0 else ''
if hours and h > 0 and minutes and m > 0:
minutetext = ' '+minutetext
if seconds and s > 0 and ((minutes and m > 0) or (hours and h > 0)):
secondtext = ' '+secondtext
return '{0}{1}{2}'.format(hourtext, minutetext, secondtext)
# Sends a Twitch or Discord chat message or a whisper/DM depending on where the
# initiating message came from
def ChatOrWhisper(self, msg, discord=True, whisper=True):
if self.Data is None:
self.Parent.SendTwitchMessage(msg)
return
whisper = whisper and self.Data.IsWhisper()
if self.Data.IsFromTwitch():
self.Parent.SendTwitchWhisper(self.Data.User, msg) if whisper else self.Parent.SendTwitchMessage(msg)
elif discord and self.Data.IsFromDiscord():
self.Parent.SendDiscordDM(self.Data.User, msg) if whisper else self.Parent.SendDiscordMessage(msg)
# Parsed commands object for use in ProcessCommand method
class CommandMatch:
def __init__(self, user, matched, commandnames, params):
self.CommandNames = commandnames
self.MatchedCommand = matched
self.Params = params
self.User = user
self.Target = self.Target = params[0] if len(params) > 0 else None
| mit | 618,498,824,165,726,100 | 35.598485 | 107 | 0.634651 | false |
hjweide/cifar-10-uncertainty | iter_funcs.py | 1 | 2875 | import lasagne
import theano
import theano.tensor as T
from lasagne import layers
from lasagne.regularization import regularize_network_params, l2
def create_iter_funcs_train(l_out, lr, mntm, wd):
X = T.tensor4('X')
y = T.ivector('y')
X_batch = T.tensor4('X_batch')
y_batch = T.ivector('y_batch')
y_hat = layers.get_output(l_out, X, deterministic=False)
# softmax loss
train_loss = T.mean(
T.nnet.categorical_crossentropy(y_hat, y))
# L2 regularization
train_loss += wd * regularize_network_params(l_out, l2)
train_acc = T.mean(
T.eq(y_hat.argmax(axis=1), y))
all_params = layers.get_all_params(l_out, trainable=True)
updates = lasagne.updates.nesterov_momentum(
train_loss, all_params, lr, mntm)
train_iter = theano.function(
inputs=[theano.Param(X_batch), theano.Param(y_batch)],
outputs=[train_loss, train_acc],
updates=updates,
givens={
X: X_batch,
y: y_batch,
},
)
return train_iter
def create_iter_funcs_valid(l_out, bs=None, N=50, mc_dropout=False):
X = T.tensor4('X')
y = T.ivector('y')
X_batch = T.tensor4('X_batch')
y_batch = T.ivector('y_batch')
if not mc_dropout:
y_hat = layers.get_output(l_out, X, deterministic=True)
else:
if bs is None:
raise ValueError('a fixed batch size is required for mc dropout')
X_repeat = T.extra_ops.repeat(X, N, axis=0)
y_sample = layers.get_output(
l_out, X_repeat, deterministic=False)
sizes = [X_repeat.shape[0] / X.shape[0]] * bs
y_sample_split = T.as_tensor_variable(
T.split(y_sample, sizes, bs, axis=0))
y_hat = T.mean(y_sample_split, axis=1)
valid_loss = T.mean(
T.nnet.categorical_crossentropy(y_hat, y))
valid_acc = T.mean(
T.eq(y_hat.argmax(axis=1), y))
valid_iter = theano.function(
inputs=[theano.Param(X_batch), theano.Param(y_batch)],
outputs=[valid_loss, valid_acc],
givens={
X: X_batch,
y: y_batch,
},
)
return valid_iter
def create_iter_funcs_test(l_out, bs, N=50):
X = T.tensor4('X')
X_batch = T.tensor4('X_batch')
X_repeat = T.extra_ops.repeat(X, N, axis=0)
y_sample = layers.get_output(
l_out, X_repeat, deterministic=False)
# the number of splits needs to be pre-defined
sizes = [X_repeat.shape[0] / X.shape[0]] * bs
y_sample_split = T.as_tensor_variable(
T.split(y_sample, sizes, bs, axis=0))
y_hat = T.mean(y_sample_split, axis=1)
#y_var = T.var(y_sample_split, axis=1)
test_iter = theano.function(
inputs=[theano.Param(X_batch)],
outputs=y_hat,
#outputs=[y_hat, y_var],
givens={
X: X_batch,
},
)
return test_iter
| mit | 9,025,105,275,118,694,000 | 26.644231 | 77 | 0.582609 | false |
EDUlib/edx-ora2 | openassessment/assessment/migrations/0023_assign_criteria_and_option_labels.py | 1 | 14308 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""
Default the criterion and option labels to the same value as "name".
"""
for criterion in orm['assessment.criterion'].objects.filter(label=""):
criterion.label = criterion.name
criterion.save()
for option in orm['assessment.criterionoption'].objects.filter(label=""):
option.label = option.name
option.save()
def backwards(self, orm):
""" The backwards migration does nothing. """
pass
models = {
'assessment.aiclassifier': {
'Meta': {'object_name': 'AIClassifier'},
'classifier_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classifiers'", 'to': "orm['assessment.AIClassifierSet']"}),
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Criterion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'assessment.aiclassifierset': {
'Meta': {'ordering': "['-created_at', '-id']", 'object_name': 'AIClassifierSet'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Rubric']"})
},
'assessment.aigradingworkflow': {
'Meta': {'object_name': 'AIGradingWorkflow'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.Assessment']"}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.AIClassifierSet']"}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'essay_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Rubric']"}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'assessment.aitrainingworkflow': {
'Meta': {'object_name': 'AITrainingWorkflow'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.AIClassifierSet']"}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'training_examples': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'+'", 'symmetrical': 'False', 'to': "orm['assessment.TrainingExample']"}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.AssessmentFeedbackOption']"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Criterion']"}),
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'grading_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure_hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'})
},
'assessment.studenttrainingworkflow': {
'Meta': {'object_name': 'StudentTrainingWorkflow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.studenttrainingworkflowitem': {
'Meta': {'ordering': "['workflow', 'order_num']", 'unique_together': "(('workflow', 'order_num'),)", 'object_name': 'StudentTrainingWorkflowItem'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'training_example': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.TrainingExample']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['assessment.StudentTrainingWorkflow']"})
},
'assessment.trainingexample': {
'Meta': {'object_name': 'TrainingExample'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options_selected': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['assessment.CriterionOption']", 'symmetrical': 'False'}),
'raw_answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"})
}
}
complete_apps = ['assessment']
symmetrical = True
| agpl-3.0 | 5,719,073,179,449,297,000 | 79.836158 | 217 | 0.563042 | false |
npapier/sbf | pak/mkdb/cityhash.py | 1 | 3317 | # SConsBuildFramework - Copyright (C) 2013, Nicolas Papier.
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation.
# Author Guillaume Brocker
#
# http://code.google.com/p/cityhash/
import os
import re
import shutil
import subprocess
# Version components definition
versionMajor = 1
versionMinor = 1
versionMaint = 0
# Package name and version definition
packageName = 'cityhash'
packageVersion = '{0}-{1}-{2}'.format(versionMajor, versionMinor, versionMaint)
# Defines the path to the source files
sourcePath = '{0}-{1}.{2}.{3}'.format(packageName, versionMajor, versionMinor, versionMaint )
# Defines the content of the SBF project file.
sconsDefaultOptions = """productName = '{name}'
type = 'static'
version = '{version}'""".format( name=packageName, version=packageVersion)
def patcher():
def _patcher():
global io
global os
global packageName
global re
global sconsDefaultOptions
global shutil
global sourcePath
# Creates a new directory tree for the compilation.
os.makedirs(packageName+'/include')
os.makedirs(packageName+'/src')
# Creates the SBF project options file
sconsDefaultoptionsFile = open( packageName+'/default.options', 'w' )
sconsDefaultoptionsFile.write( sconsDefaultOptions )
sconsDefaultoptionsFile.close()
# Copy the sconstruct file into the project
shutil.copy( os.getenv('SCONS_BUILD_FRAMEWORK')+'/template/projectTemplate/sconstruct', packageName )
# Moves include and source files to the right place.
shutil.move( sourcePath+'/src/city.h', packageName+'/include' )
###Deactivated###
#shutil.move( srcPath+'/citycrc.h', includePath )
shutil.move( sourcePath+'/src/city.cc', packageName+'/src/city.cpp' )
# Patches the 'city.h' file
with open( packageName+'/include/city.h', 'r+' ) as city_h:
city_h_lines = city_h.readlines()
for (i, line) in enumerate(city_h_lines):
city_h_lines[i] = re.sub('^(uint\d+)', 'extern "C" \\1', line)
city_h.seek(0)
city_h.writelines(city_h_lines)
# Patches the city.cpp file
with open( packageName+'/src/city.cpp', 'r+' ) as city_cpp:
city_cpp_lines = city_cpp.readlines()
for (i, line) in enumerate(city_cpp_lines):
if( re.match('^#include "config.h"', line) ):
city_cpp_lines[i] = '//' + line
city_cpp.seek(0)
city_cpp.writelines(city_cpp_lines)
return lambda : _patcher()
def builder():
def _builder():
global os
global packageName
global subprocess
owd = os.getcwd()
nwd = owd + '/' + packageName
os.chdir(nwd)
installPaths = 'installPaths={0}/{1}/local'.format(owd, packageName)
subprocess.call(['scons',installPaths,'release'], shell=True)
subprocess.call(['scons',installPaths,'debug'], shell=True)
os.chdir(owd)
return lambda : _builder()
descriptor = {
'name' : packageName,
'version' : packageVersion,
'urls' : [ 'http://cityhash.googlecode.com/files/{0}-{1}.{2}.{3}.tar.gz'.format(packageName, versionMajor, versionMinor, versionMaint) ],
'include' : [ packageName+'/local/include/*.h' ],
'license' : [ sourcePath+'/COPYING' ],
'lib' : [ packageName+'/local/bin/*.lib' ],
'builds' : [ patcher(), builder() ]
}
| gpl-3.0 | 2,374,909,519,164,992,500 | 29.292453 | 139 | 0.669279 | false |
desihub/qlf | backend/framework/qlf/dashboard/bokeh/qaskypeak/main.py | 1 | 6438 | from bokeh.layouts import row, column
from bokeh.models import HoverTool, ColumnDataSource, Span
from bokeh.models import LinearColorMapper
from bokeh.models import TapTool, OpenURL, Range1d
from bokeh.models.widgets import Div
from qlf_models import QLFModels
from dashboard.bokeh.helper import sort_obj
from dashboard.bokeh.plots.descriptors.table import Table
from dashboard.bokeh.plots.descriptors.title import Title
from dashboard.bokeh.plots.plot2d.main import Plot2d
from dashboard.bokeh.helper import get_palette
import numpy as np
from bokeh.resources import CDN
from bokeh.embed import file_html
class Skypeak:
def __init__(self, process_id, arm, spectrograph):
self.selected_process_id = process_id
self.selected_arm = arm
self.selected_spectrograph = spectrograph
def load_qa(self):
cam = self.selected_arm+str(self.selected_spectrograph)
mergedqa = QLFModels().get_output(self.selected_process_id, cam)
check_spectra = mergedqa['TASKS']['CHECK_SPECTRA']
gen_info = mergedqa['GENERAL_INFO']
ra = gen_info['RA']
dec = gen_info['DEC']
nrg = check_spectra['PARAMS']['PEAKCOUNT_NORMAL_RANGE']
wrg = check_spectra['PARAMS']['PEAKCOUNT_WARN_RANGE']
current_exposures = [check_spectra['METRICS']['PEAKCOUNT']]
program = gen_info['PROGRAM'].upper()
reference_exposures = check_spectra['PARAMS']['PEAKCOUNT_' +
program + '_REF']
obj_type = sort_obj(gen_info)
my_palette = get_palette("RdYlBu_r")
peak_tooltip = """
<div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">PEAKCOUNT: </span>
<span style="font-size: 1vw; color: #515151">@peakcount_fib</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">RA: </span>
<span style="font-size: 1vw; color: #515151;">@x1</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">DEC: </span>
<span style="font-size: 1vw; color: #515151;">@y1</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Obj Type: </span>
<span style="font-size: 1vw; color: #515151;">@OBJ_TYPE</span>
</div>
</div>
"""
url = "http://legacysurvey.org/viewer?ra=@ra&dec=@dec&zoom=16&layer=decals-dr5"
qlf_fiberid = np.arange(0, 500)
peak_hover = HoverTool(tooltips=peak_tooltip)
peakcount_fib = check_spectra['METRICS']['PEAKCOUNT_FIB']
source = ColumnDataSource(data={
'x1': ra,
'y1': dec,
'peakcount_fib': peakcount_fib,
'delta_peakcount_fib': np.array(peakcount_fib)-reference_exposures,
'QLF_FIBERID': qlf_fiberid,
'OBJ_TYPE': obj_type,
})
low, high = wrg
mapper = LinearColorMapper(palette=my_palette,
low=low, #0.98*np.min(peakcount_fib),
high=high, #1.02*np.max(peakcount_fib))
nan_color='darkgrey')
radius = 0.0165
radius_hover = 0.02
# centralize wedges in plots:
ra_center=0.5*(max(ra)+min(ra))
dec_center=0.5*(max(dec)+min(dec))
xrange_wedge = Range1d(start=ra_center + .95, end=ra_center-.95)
yrange_wedge = Range1d(start=dec_center+.82, end=dec_center-.82)
# axes limit
xmin, xmax = [min(gen_info['RA'][:]), max(gen_info['RA'][:])]
ymin, ymax = [min(gen_info['DEC'][:]), max(gen_info['DEC'][:])]
xfac, yfac = [(xmax-xmin)*0.06, (ymax-ymin)*0.06]
left, right = xmin - xfac, xmax+xfac
bottom, top = ymin-yfac, ymax+yfac
wedge_plot = Plot2d(
x_range=xrange_wedge,
y_range=yrange_wedge,
x_label="RA",
y_label="DEC",
tooltip=peak_tooltip,
title="PEAKCOUNT",
width=500,
height=380,
).wedge(
source,
x='x1',
y='y1',
field='delta_peakcount_fib',
mapper=mapper,
).plot
info_col = Title().write_description('skypeak')
# ================================
# histogram
hist_tooltip = """
<div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Frequency: </span>
<span style="font-size: 1vw; color: #515151">@hist</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Peakcount: </span>
<span style="font-size: 1vw; color: #515151;">[@left, @right]</span>
</div>
</div>
"""
hist, edges = np.histogram(peakcount_fib, bins="sqrt")
source_hist = ColumnDataSource(data={
'hist': hist,
'histplusone': hist+1,
'bottom': [0] * len(hist),
'bottomplusone': [1]*len(hist),
'left': edges[:-1],
'right': edges[1:]
})
p_hist = Plot2d(
y_range=(1, 11**(int(np.log10(max(hist)))+1)),
x_label='PEAKCOUNT',
y_label='Frequency + 1',
tooltip=hist_tooltip,
title="",
width=550,
height=300,
yscale="log",
hover_mode="vline",
).quad(
source_hist,
top='histplusone',
bottom='bottomplusone',
line_width=1,
)
# Prepare tables
keynames = ["PEAKCOUNT" for i in range(len(current_exposures))]
table = Table().single_table(keynames, current_exposures, reference_exposures, nrg, wrg)
layout = column(info_col, Div(),
table, Div(),
column(wedge_plot, sizing_mode='scale_both'),
column(p_hist, sizing_mode='scale_both'),
css_classes=["display-grid"])
return file_html(layout, CDN, "SKYPEAK")
| bsd-3-clause | -3,194,120,361,751,438,300 | 35.168539 | 103 | 0.517552 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/bgp_settings.py | 1 | 1394 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BgpSettings(Model):
"""BgpSettings.
:param asn: Gets or sets this BGP speaker's ASN
:type asn: long
:param bgp_peering_address: Gets or sets the BGP peering address and BGP
identifier of this BGP speaker
:type bgp_peering_address: str
:param peer_weight: Gets or sets the weight added to routes learned from
this BGP speaker
:type peer_weight: int
"""
_attribute_map = {
'asn': {'key': 'asn', 'type': 'long'},
'bgp_peering_address': {'key': 'bgpPeeringAddress', 'type': 'str'},
'peer_weight': {'key': 'peerWeight', 'type': 'int'},
}
def __init__(self, **kwargs):
super(BgpSettings, self).__init__(**kwargs)
self.asn = kwargs.get('asn', None)
self.bgp_peering_address = kwargs.get('bgp_peering_address', None)
self.peer_weight = kwargs.get('peer_weight', None)
| mit | -7,371,447,792,608,689,000 | 35.684211 | 76 | 0.586083 | false |
Spoken-tutorial/spoken-website | creation/migrations/0001_initial.py | 1 | 29727 | # -*- coding: utf-8 -*-
# Third Party Stuff
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AdminReviewerNotification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='AdminReviewLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.PositiveSmallIntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ArchivedVideo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('version', models.PositiveSmallIntegerField(default=0)),
('video', models.CharField(max_length=255)),
('atype', models.PositiveSmallIntegerField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Collaborate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('contact_number', models.CharField(max_length=20, null=True)),
('institution_name', models.CharField(max_length=255)),
('foss_name', models.CharField(max_length=255)),
('are_you_one', models.CharField(max_length=255)),
('howmuch_time', models.PositiveIntegerField()),
('availability_constraints', models.TextField(null=True, blank=True)),
('is_reviewer', models.BooleanField()),
('contribs_foss', models.TextField(null=True, blank=True)),
('educational_qualifications', models.TextField(null=True, blank=True)),
('prof_experience', models.CharField(max_length=255, null=True, blank=True)),
('lang_contributor', models.BooleanField()),
('lead_st', models.BooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ContributeTowards',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='ContributorLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('component', models.CharField(max_length=255)),
('status', models.PositiveSmallIntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ContributorNotification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ContributorRole',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.BooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Contributor Role',
},
),
migrations.CreateModel(
name='DomainReviewerNotification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='DomainReviewerRole',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.BooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Domain Reviewer Role',
},
),
migrations.CreateModel(
name='DomainReviewLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('component', models.CharField(max_length=255)),
('status', models.PositiveSmallIntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='FossAvailableForTest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.BooleanField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='FossAvailableForWorkshop',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.BooleanField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='FossCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('foss', models.CharField(unique=True, max_length=255)),
('description', models.TextField()),
('status', models.BooleanField(max_length=2)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('foss',),
'verbose_name': 'FOSS Categorie',
},
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('code', models.CharField(default=b'en', max_length=10)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Level',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.CharField(max_length=255)),
('code', models.CharField(max_length=10)),
],
options={
'verbose_name': 'Tutorial Level',
},
),
migrations.CreateModel(
name='NeedImprovementLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('review_state', models.PositiveSmallIntegerField()),
('component', models.CharField(max_length=50)),
('comment', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='OperatingSystem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='PlaylistInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('playlist_id', models.CharField(max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('foss', models.ForeignKey(to='creation.FossCategory')),
('language', models.ForeignKey(to='creation.Language')),
],
options={
'verbose_name': 'Playlist Info',
},
),
migrations.CreateModel(
name='PlaylistItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('item_id', models.CharField(max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('playlist', models.ForeignKey(to='creation.PlaylistInfo')),
],
options={
'verbose_name': 'Playlist Item',
},
),
migrations.CreateModel(
name='PublicReviewLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='PublishTutorialLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='QualityReviewerNotification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='QualityReviewerRole',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.BooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('foss_category', models.ForeignKey(to='creation.FossCategory')),
('language', models.ForeignKey(to='creation.Language')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Quality Reviewer Role',
},
),
migrations.CreateModel(
name='QualityReviewLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('component', models.CharField(max_length=255)),
('status', models.PositiveSmallIntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='RoleRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('role_type', models.IntegerField(default=0)),
('status', models.PositiveSmallIntegerField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('approved_user', models.ForeignKey(related_name='approved_user', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('user', models.ForeignKey(related_name='user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SuggestExample',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('topic_title', models.CharField(max_length=255)),
('example_description', models.TextField()),
('script_writer', models.BooleanField()),
('is_reviewer', models.BooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SuggestTopic',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('topic_title', models.CharField(max_length=255)),
('brief_description', models.TextField()),
('example_suggestion', models.BooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
('difficulty_level', models.ForeignKey(to='creation.Level')),
('operating_system', models.ManyToManyField(to='creation.OperatingSystem')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='TutorialCommonContent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slide', models.CharField(max_length=255)),
('slide_status', models.PositiveSmallIntegerField(default=0)),
('code', models.CharField(max_length=255)),
('code_status', models.PositiveSmallIntegerField(default=0)),
('assignment', models.CharField(max_length=255)),
('assignment_status', models.PositiveSmallIntegerField(default=0)),
('prerequisite_status', models.PositiveSmallIntegerField(default=0)),
('keyword', models.TextField()),
('keyword_status', models.PositiveSmallIntegerField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('assignment_user', models.ForeignKey(related_name='assignments', to=settings.AUTH_USER_MODEL)),
('code_user', models.ForeignKey(related_name='codes', to=settings.AUTH_USER_MODEL)),
('keyword_user', models.ForeignKey(related_name='keywords', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Tutorial Common Content',
},
),
migrations.CreateModel(
name='TutorialDetail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('tutorial', models.CharField(max_length=255)),
('order', models.IntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('foss', models.ForeignKey(to='creation.FossCategory')),
('level', models.ForeignKey(to='creation.Level')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Tutorial Detail',
},
),
migrations.CreateModel(
name='TutorialMissingComponent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('component', models.PositiveSmallIntegerField()),
('report_type', models.BooleanField(default=0)),
('remarks', models.TextField(null=True, blank=True)),
('inform_me', models.BooleanField(default=0)),
('email', models.CharField(max_length=255, null=True, blank=True)),
('reply_status', models.BooleanField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='TutorialMissingComponentReply',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('reply_message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('missing_component', models.ForeignKey(to='creation.TutorialMissingComponent')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='TutorialResource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('outline', models.TextField()),
('outline_status', models.PositiveSmallIntegerField(default=0)),
('script', models.URLField(max_length=255)),
('script_status', models.PositiveSmallIntegerField(default=0)),
('timed_script', models.URLField(max_length=255)),
('video', models.CharField(max_length=255)),
('video_id', models.CharField(default=None, max_length=255, null=True, blank=True)),
('playlist_item_id', models.CharField(default=None, max_length=255, null=True, blank=True)),
('video_thumbnail_time', models.TimeField(default='00:00:00')),
('video_status', models.PositiveSmallIntegerField(default=0)),
('status', models.PositiveSmallIntegerField(default=0)),
('version', models.PositiveSmallIntegerField(default=0)),
('hit_count', models.PositiveIntegerField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('common_content', models.ForeignKey(to='creation.TutorialCommonContent')),
('language', models.ForeignKey(to='creation.Language')),
('outline_user', models.ForeignKey(related_name='outlines', to=settings.AUTH_USER_MODEL)),
('script_user', models.ForeignKey(related_name='scripts', to=settings.AUTH_USER_MODEL)),
('tutorial_detail', models.ForeignKey(to='creation.TutorialDetail')),
('video_user', models.ForeignKey(related_name='videos', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='tutorialmissingcomponent',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='tutorialmissingcomponent',
name='user',
field=models.ForeignKey(related_name='raised_user', blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='tutorialcommoncontent',
name='prerequisite',
field=models.ForeignKey(related_name='prerequisite', blank=True, to='creation.TutorialDetail', null=True),
),
migrations.AddField(
model_name='tutorialcommoncontent',
name='prerequisite_user',
field=models.ForeignKey(related_name='prerequisite', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='tutorialcommoncontent',
name='slide_user',
field=models.ForeignKey(related_name='slides', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='tutorialcommoncontent',
name='tutorial_detail',
field=models.OneToOneField(related_name='tutorial_detail', to='creation.TutorialDetail'),
),
migrations.AddField(
model_name='qualityreviewlog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='qualityreviewlog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='qualityreviewernotification',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='qualityreviewernotification',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='publishtutoriallog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='publishtutoriallog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='publicreviewlog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='publicreviewlog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='needimprovementlog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='needimprovementlog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='fossavailableforworkshop',
name='foss',
field=models.ForeignKey(to='creation.FossCategory'),
),
migrations.AddField(
model_name='fossavailableforworkshop',
name='language',
field=models.ForeignKey(to='creation.Language'),
),
migrations.AddField(
model_name='fossavailablefortest',
name='foss',
field=models.ForeignKey(to='creation.FossCategory'),
),
migrations.AddField(
model_name='fossavailablefortest',
name='language',
field=models.ForeignKey(to='creation.Language'),
),
migrations.AddField(
model_name='domainreviewlog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='domainreviewlog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='domainreviewerrole',
name='foss_category',
field=models.ForeignKey(to='creation.FossCategory'),
),
migrations.AddField(
model_name='domainreviewerrole',
name='language',
field=models.ForeignKey(to='creation.Language'),
),
migrations.AddField(
model_name='domainreviewerrole',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='domainreviewernotification',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='domainreviewernotification',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='contributorrole',
name='foss_category',
field=models.ForeignKey(to='creation.FossCategory'),
),
migrations.AddField(
model_name='contributorrole',
name='language',
field=models.ForeignKey(to='creation.Language'),
),
migrations.AddField(
model_name='contributorrole',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='contributornotification',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='contributornotification',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='contributorlog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='contributorlog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='collaborate',
name='contribute_towards',
field=models.ManyToManyField(to='creation.ContributeTowards'),
),
migrations.AddField(
model_name='collaborate',
name='language',
field=models.ForeignKey(to='creation.Language'),
),
migrations.AddField(
model_name='collaborate',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='archivedvideo',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='archivedvideo',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='adminreviewlog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='adminreviewlog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='adminreviewernotification',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='adminreviewernotification',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='tutorialresource',
unique_together=set([('tutorial_detail', 'language')]),
),
migrations.AlterUniqueTogether(
name='tutorialdetail',
unique_together=set([('foss', 'tutorial', 'level')]),
),
migrations.AlterUniqueTogether(
name='rolerequest',
unique_together=set([('user', 'role_type')]),
),
migrations.AlterUniqueTogether(
name='qualityreviewerrole',
unique_together=set([('user', 'foss_category', 'language')]),
),
migrations.AlterUniqueTogether(
name='playlistitem',
unique_together=set([('playlist', 'item_id')]),
),
migrations.AlterUniqueTogether(
name='playlistinfo',
unique_together=set([('foss', 'language')]),
),
migrations.AlterUniqueTogether(
name='fossavailableforworkshop',
unique_together=set([('foss', 'language')]),
),
migrations.AlterUniqueTogether(
name='fossavailablefortest',
unique_together=set([('foss', 'language')]),
),
migrations.AlterUniqueTogether(
name='domainreviewerrole',
unique_together=set([('user', 'foss_category', 'language')]),
),
migrations.AlterUniqueTogether(
name='contributorrole',
unique_together=set([('user', 'foss_category', 'language')]),
),
]
| gpl-3.0 | 8,895,247,861,478,283,000 | 44.040909 | 135 | 0.552192 | false |
sh01/taf | setup.py | 1 | 1102 | #!/usr/bin/env python3
#Copyright 20015 Sebastian Hagen
# This file is part of taf.
# taf is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation
#
# taf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from distutils.core import setup
if (sys.version_info[0] <= 2):
raise Exception('This program needs a python >= 3.0')
setup(name='taf',
version='0.1',
description='TAF: Tunneled attention flags.',
author='Sebastian Hagen',
author_email='[email protected]',
#url='http://git.memespace.net/git/??',
packages=('taf',),
scripts=(
'src/bin/logs2stdout.py',
'src/bin/taf_ui.py'
),
package_dir={'taf':'src/taf'}
)
| gpl-2.0 | -1,341,954,545,255,813,000 | 29.611111 | 71 | 0.715064 | false |
terhorstd/nest-simulator | pynest/examples/one_neuron_with_noise.py | 1 | 3494 | # -*- coding: utf-8 -*-
#
# one_neuron_with_noise.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
One neuron with noise
----------------------
This script simulates a neuron with input from the Poisson generator, and
records the neuron's membrane potential.
KEYWORDS: iaf_psc_alpha, Poisson generator, voltmeter
"""
###############################################################################
# First, we import all necessary modules needed to simulate, analyze and
# plot our example. Additionally, we set the verbosity to only show warnings
# and reset the kernel.
# Resetting the kernel removes any nodes we may have created previously and
# resets the internal clock to zero. This allows us to execute the script
# several times in a Python shell without interference from previous NEST
# simulations.
import nest
import nest.voltage_trace
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# Second, the nodes (the neuron, poisson generator (two of them), and the
# voltmeter) are created using the `Create()` function.
# We store the returned handles in variables for later reference.
neuron = nest.Create("iaf_psc_alpha")
noise = nest.Create("poisson_generator", 2)
voltmeter = nest.Create("voltmeter")
###############################################################################
# Third, the voltmeter and the Poisson generator are configured using
# `SetStatus()`, which expects a list of node handles and a list of parameter
# dictionaries. Note that we do not need to set parameters for the neuron,
# since it has satisfactory defaults.
# We set each Poisson generator to 8 000 Hz and 15 000 Hz, respectively.
# For the voltmeter, we want to record the global id of the observed nodes and
# set the `withgid` flag of the voltmeter to ``True``.
# We also set its property `withtime` so it will also record the points
# in time at which it samples the membrane voltage.
nest.SetStatus(noise, [{"rate": 80000.0}, {"rate": 15000.0}])
nest.SetStatus(voltmeter, {"withgid": True, "withtime": True})
###############################################################################
# Fourth, the neuron is connected to the Poisson generator and to the
# voltmeter. We also specify the synaptic weight and delay in this step.
nest.Connect(noise, neuron, syn_spec={'weight': [[1.2, -1.0]], 'delay': 1.0})
nest.Connect(voltmeter, neuron)
###############################################################################
# Now we simulate the network using `Simulate()`, which takes the
# desired simulation time in milliseconds.
nest.Simulate(1000.0)
###############################################################################
# Finally, we plot the neuron's membrane potential as a function of
# time.
nest.voltage_trace.from_device(voltmeter)
| gpl-2.0 | -7,239,480,009,474,898,000 | 39.16092 | 79 | 0.643389 | false |
jeremiedecock/snippets | python/pyqt/pyqt5/widget_QTableView_delegate_on_edit_using_dateedit_widget.py | 1 | 4596 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Ref:
# - http://doc.qt.io/qt-5/modelview.html#3-4-delegates
# - http://doc.qt.io/qt-5/model-view-programming.html#delegate-classes
# - http://doc.qt.io/qt-5/qabstractitemdelegate.html#details
# - http://doc.qt.io/qt-5/qitemdelegate.html#details
# - http://doc.qt.io/qt-5/qstyleditemdelegate.html#details
# - http://doc.qt.io/qt-5/qtwidgets-itemviews-spinboxdelegate-example.html
import sys
import datetime
from PyQt5.QtCore import Qt, QAbstractTableModel, QVariant
from PyQt5.QtWidgets import QApplication, QTableView, QStyledItemDelegate, QDateEdit
DATETIME_FORMAT = '%Y-%m-%d'
class MyData:
def __init__(self):
self._num_rows = 3
self._num_columns = 2
self._data = [[datetime.datetime.now().strftime(DATETIME_FORMAT) for j in range(self._num_columns)] for i in range(self._num_rows)]
def get_num_rows(self):
return self._num_rows
def get_num_columns(self):
return self._num_columns
def get_data(self, row_index, column_index):
value = self._data[row_index][column_index]
print("read ({},{}): {}".format(row_index, column_index, value))
return value
def set_data(self, row_index, column_index, value):
print("write ({},{}): {}".format(row_index, column_index, value))
self._data[row_index][column_index] = value
###############################################################################
class MyModel(QAbstractTableModel):
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data # DON'T CALL THIS ATTRIBUTE "data", A METHOD ALREADY HAVE THIS NAME (model.data(index, role)) !!!
def rowCount(self, parent):
return self._data.get_num_rows()
def columnCount(self, parent):
return self._data.get_num_columns()
def data(self, index, role):
if role == Qt.DisplayRole or role == Qt.EditRole:
# See https://stackoverflow.com/a/8480223
return self._data.get_data(index.row(), index.column())
return QVariant()
def setData(self, index, value, role):
if role == Qt.EditRole:
try:
self._data.set_data(index.row(), index.column(), value)
# The following line are necessary e.g. to dynamically update the QSortFilterProxyModel
self.dataChanged.emit(index, index, [Qt.EditRole])
except Exception as e:
print(e)
return False
return True
def flags(self, index):
return Qt.ItemIsSelectable | Qt.ItemIsEditable | Qt.ItemIsEnabled
###############################################################################
class MyDelegate(QStyledItemDelegate):
def createEditor(self, parent, option, index):
editor = QDateEdit(parent=parent)
editor.setMinimumDate(datetime.datetime(year=2017, month=9, day=1))
editor.setMaximumDate(datetime.datetime(year=2020, month=9, day=1))
editor.setDisplayFormat("yyyy-MM-dd")
editor.setCalendarPopup(True)
# setFrame(): tell whether the line edit draws itself with a frame.
# If enabled (the default) the line edit draws itself inside a frame, otherwise the line edit draws itself without any frame.
editor.setFrame(False)
return editor
def setEditorData(self, editor, index):
str_value = index.data(Qt.EditRole) # equivalent of value = index.model().data(index, Qt.EditRole)
value = datetime.datetime.strptime(str_value, DATETIME_FORMAT)
editor.setDate(value.date()) # value cannot be a string, it have to be a datetime...
def setModelData(self, editor, model, index):
editor.interpretText()
value = editor.text()
model.setData(index, value, Qt.EditRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
if __name__ == '__main__':
app = QApplication(sys.argv)
data = MyData()
table_view = QTableView()
my_model = MyModel(data)
table_view.setModel(my_model)
delegate = MyDelegate()
table_view.setItemDelegate(delegate)
table_view.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
| mit | 1,544,970,414,205,038,800 | 34.353846 | 139 | 0.625762 | false |
google-research/google-research | single_view_mpi/libs/geometry.py | 1 | 23328 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Geometry utilities.
In these functions:
* Shapes are known statically. Exception: functions dealing with
points lists, whose length is data-dependent.
* Where possible, utility functions operate on the last one or two
dimensions of their inputs, and will function irrespective of how many
preceding dimensions are present. Where it makes sense, functions support
broadcasting on the part of the shape preceding the fixed dimensions.
This is to allow preceding dimensions to freely be used for batching or
other purposes.
* Camera poses are representated as 3x4 matrices (consisting of a 3x3 rotation
matrix and a 3-coordinate translation vector):
[[ r r r tx ]
[ r r r ty ]
[ r r r tz ]]
The matrix maps a position in world-space into a position relative to the
camera position. (Conventionally, the camera position has the Z axis pointing
into the screen and the Y axis pointing down.) Functions to manipulate such
matrices have names beginning "mat34_".
* Camera intrinsics are represented as a tensor of last dimension 4. The four
elements are fx, fy (focal length) and cx, cy (principal point). Intrinsics
are independent of image-size, they are expressed as if the image runs from
(0,0) to (1,1). So typically cx == cy == 0.5, and for a 90-degree field of
view, fx == 0.5.
* Points (whether 2D or 3D) are represented using the last axis of a tensor.
A set of N 3D points would have shape [N, 3].
* Planes in 3D are represented as 4-vectors. A point x is on the plane p exactly
when p.x == 0.
* We use texture coordinates to represent points in an image. They go from (0,0)
in the top-left corner of an image to (1,1) in the bottom right. It is
convenient to work with these coordinates rather than counts of pixels,
because they are resolution-independent.
This file is organised in the following sections:
MATRICES, PLANES, POINTS
– basic 3D geometry operations.
CAMERAS
– intrinsics, projection, camera-relative points.
IMAGES AND SAMPLING
– bilinear-sampling from images.
WARPS AND HOMOGRAPHIES
– plane sweep, homography, flow warping, depth warping.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
from tensorflow_addons import image as tfa_image
from single_view_mpi.libs import utils
# ========== MATRICES, PLANES, POINTS ==========
def check_input_shape(name, tensor, axis, value):
"""Utility function for checking tensor shapes."""
shape = tensor.shape.as_list()
if shape[axis] != value:
raise ValueError('Input "%s": dimension %d should be %s. Shape = %s' %
(name, axis, value, shape))
def check_input_m34(name, tensor):
check_input_shape(name, tensor, -1, 4)
check_input_shape(name, tensor, -2, 3)
@utils.name_scope
def broadcasting_matmul(a, b, **kwargs):
(a, b) = utils.broadcast_to_match(a, b, ignore_axes=2)
return tf.matmul(a, b, **kwargs)
def mat34_to_mat44(matrix):
"""Converts 3x4 matrices to 4x4 matrices by adding filler.
Considering the last two dimensions of the input tensor, where m
indicates a matrix coefficient and t a matrix coefficient for translation,
this function does the following:
[[m, m, m, t], [[m, m, m, t],
[m, m, m, t], ===> [m, m, m, t],
[m, m, m, t]] [m, m, m, t],
[0, 0, 0, 1]]
Args:
matrix: [..., 3, 4] matrix
Returns:
A [..., 4, 4] tensor with an extra row [0, 0, 0, 1] added to each matrix.
Dimensions other than that last two are the same as for the input.
Raises:
ValueError: if input has incompatible shape.
"""
shape = matrix.shape.as_list()
check_input_m34('matrix', matrix)
extra_dims = shape[:-2]
filler = tf.constant([0.0, 0.0, 0.0, 1.0],
shape=len(extra_dims) * [1] + [1, 4])
filler = tf.tile(filler, extra_dims + [1, 1])
return tf.concat([matrix, filler], axis=-2)
def mat33_to_mat44(matrix):
"""Converts 3x3 matrices to 4x4 by adding zero translation and filler.
Considering the last two dimensions of the input tensor, where m indicates
a matrix entry, this function does the following:
[[m, m, m], [[m, m, m, 0],
[m, m, m], ===> [m, m, m, 0],
[m, m, m]] [m, m, m, 0],
[0, 0, 0, 1]]
Args:
matrix: A [..., 3, 3] tensor.
Returns:
A [..., 4, 4] matrix tensor. Dimensions other than the last two are
the same as for the input matrix.
Raises:
ValueError: if input has incompatible shape.
"""
shape = matrix.shape.as_list()
check_input_shape('matrix', matrix, -1, 3)
check_input_shape('matrix', matrix, -2, 3)
extra_dims = shape[:-2]
zeros = tf.zeros(extra_dims + [3, 1], dtype=matrix.dtype)
return mat34_to_mat44(tf.concat([matrix, zeros], axis=-1))
@utils.name_scope
def mat34_product(a, b):
"""Returns the product of a and b, 3x4 matrices.
Args:
a: [..., 3, 4] matrix
b: [..., 3, 4] matrix
Returns:
The product ab. The product is computed as if we added an extra row
[0, 0, 0, 1] to each matrix, multiplied them, and then removed the extra
row. The shapes of a and b must match, either directly or via
broadcasting.
Raises:
ValueError: if a or b are not 3x4 matrices.
"""
check_input_m34('a', a)
check_input_m34('b', b)
(a, b) = utils.broadcast_to_match(a, b, ignore_axes=2)
# Split translation part off from the rest
a33, a_translate = tf.split(a, [3, 1], axis=-1)
b33, b_translate = tf.split(b, [3, 1], axis=-1)
# Compute parts of the product
ab33 = tf.matmul(a33, b33)
ab_translate = a_translate + tf.matmul(a33, b_translate)
# Assemble
return tf.concat([ab33, ab_translate], axis=-1)
@utils.name_scope
def mat34_transform(m, v):
"""Transform a set of 3d points by a 3x4 pose matrix.
Args:
m: [..., 3, 4] matrix
v: [..., N, 3] set of N 3d points.
Returns:
The transformed points mv. The transform is computed as if we added an
extra coefficient with value 1.0 to each point, performed a matrix
multiplication, and removed the extra coefficient again. The parts of the
shape indicated by "..." must match, either directly or via broadcasting.
Raises:
ValueError: if inputs are the wrong shape.
"""
check_input_m34('m', m)
check_input_shape('v', v, -1, 3)
(m, v) = utils.broadcast_to_match(m, v, ignore_axes=2)
rotation = m[Ellipsis, :3]
# See b/116203395 for why I didn't do the next two lines together as
# translation = m[..., tf.newaxis, :, 3].
translation = m[Ellipsis, 3]
translation = translation[Ellipsis, tf.newaxis, :] # Now shape is [..., 1, 3].
# Points are stored as (N * 3) rather than (3 * N), so multiply in reverse
# rather than transposing them.
return tf.matmul(v, rotation, transpose_b=True) + translation
@utils.name_scope
def mat34_transform_planes(m, p):
"""Transform a set of 3d planes by a 3x4 pose matrix.
Args:
m: [..., 3, 4] matrix, from source space to target space
p: [..., N, 4] set of N planes in source space.
Returns:
The transformed planes p' in target space.
If point x is on the plane p, then point Mx is on the plane p'. The parts of
the shape indicated by "..." must match either directly or via broadcasting.
Raises:
ValueError: if inputs are the wrong shape.
"""
check_input_m34('m', m)
check_input_shape('p', p, -1, 4)
(m, p) = utils.broadcast_to_match(m, p, ignore_axes=2)
# If x is on the plane p, then p . x = 0. We want to find p' such that
# p' . (M x) = 0. Writing T for transpose and i for inverse, this gives us
# p'T M x = 0, so p'T = pT Mi.
# Planes are stored as (N * 4) rather than (4 * N), i.e. pT rather than p, so
# we can use this directly to compute p'T:
return tf.matmul(p, mat34_to_mat44(mat34_pose_inverse(m)))
@utils.name_scope
def mat34_pose_inverse(matrix):
"""Invert a 3x4 matrix.
Args:
matrix: [..., 3, 4] matrix where [..., 3, 3] is a rotation matrix
Returns:
The inverse matrix, of the same shape as the input. It is computed as
if we added an extra row with values [0, 0, 0, 1], inverted the
matrix, and removed the row again.
Raises:
ValueError: if input is not a 3x4 matrix.
"""
check_input_m34('matrix', matrix)
rest, translation = tf.split(matrix, [3, 1], axis=-1)
inverse = tf.linalg.matrix_transpose(rest)
inverse_translation = -tf.matmul(inverse, translation)
return tf.concat([inverse, inverse_translation], axis=-1)
@utils.name_scope
def build_matrix(elements):
"""Stacks elements along two axes to make a tensor of matrices.
Args:
elements: [n, m] matrix of tensors, each with shape [...].
Returns:
[..., n, m] tensor of matrices, resulting from concatenating
the individual tensors.
"""
rows = [tf.stack(row_elements, axis=-1) for row_elements in elements]
return tf.stack(rows, axis=-2)
@utils.name_scope
def pose_from_6dof(vec):
"""Converts vector containing 6DoF pose parameters to pose matrices.
Args:
vec: [..., 6] parameters in the order tx, ty, tz, rx, ry, rz. rx, ry and rz
are Euler angles in radians. Rotation is first by z, then by y, then by x,
and translation happens last. Each rotation is counterclockwise about its
axis.
Returns:
rigid world-to-camera transformation matrix [..., 3, 4] corresponding
to the input. Rotation angles are clamped to +/- π before conversion.
"""
check_input_shape('vec', vec, -1, 6)
shape = vec.shape.as_list()
extra_dims = shape[:-1]
# Get translation as [..., 3] and rx, ry, rz each as [..., 1].
translation, rx, ry, rz = tf.split(vec, [3, 1, 1, 1], -1)
rx = tf.squeeze(tf.clip_by_value(rx, -math.pi, math.pi), axis=-1)
ry = tf.squeeze(tf.clip_by_value(ry, -math.pi, math.pi), axis=-1)
rz = tf.squeeze(tf.clip_by_value(rz, -math.pi, math.pi), axis=-1)
cos_x = tf.cos(rx)
sin_x = tf.sin(rx)
cos_y = tf.cos(ry)
sin_y = tf.sin(ry)
cos_z = tf.cos(rz)
sin_z = tf.sin(rz)
zero = tf.zeros(extra_dims)
one = tf.ones(extra_dims)
rotate_z = build_matrix([[cos_z, -sin_z, zero], [sin_z, cos_z, zero],
[zero, zero, one]])
rotate_y = build_matrix([[cos_y, zero, sin_y], [zero, one, zero],
[-sin_y, zero, cos_y]])
rotate_x = build_matrix([[one, zero, zero], [zero, cos_x, -sin_x],
[zero, sin_x, cos_x]])
rotation = tf.matmul(tf.matmul(rotate_x, rotate_y), rotate_z)
pose = tf.concat([rotation, translation[Ellipsis, tf.newaxis]], axis=-1)
return pose
# ========== CAMERAS ==========
@utils.name_scope
def intrinsics_matrix(intrinsics):
"""Make a matrix mapping camera space to homogeneous texture coords.
Args:
intrinsics: [..., 4] intrinsics. Last dimension (fx, fy, cx, cy)
Returns:
[..., 3, 3] matrix mapping camera space to image space.
"""
fx = intrinsics[Ellipsis, 0]
fy = intrinsics[Ellipsis, 1]
cx = intrinsics[Ellipsis, 2]
cy = intrinsics[Ellipsis, 3]
zero = tf.zeros_like(fx)
one = tf.ones_like(fx)
return build_matrix(
[[fx, zero, cx], [zero, fy, cy], [zero, zero, one]])
@utils.name_scope
def inverse_intrinsics_matrix(intrinsics):
"""Return the inverse of the intrinsics matrix..
Args:
intrinsics: [..., 4] intrinsics. Last dimension (fx, fy, cx, cy)
Returns:
[..., 3, 3] matrix mapping homogeneous texture coords to camera space.
"""
fxi = 1.0 / intrinsics[Ellipsis, 0]
fyi = 1.0 / intrinsics[Ellipsis, 1]
cx = intrinsics[Ellipsis, 2]
cy = intrinsics[Ellipsis, 3]
zero = tf.zeros_like(cx)
one = tf.ones_like(cx)
return build_matrix(
[[fxi, zero, -cx * fxi], [zero, fyi, -cy * fyi], [zero, zero, one]])
@utils.name_scope
def homogenize(coords):
"""Convert (x, y) to (x, y, 1), or (x, y, z) to (x, y, z, 1)."""
ones = tf.ones_like(coords[Ellipsis, :1])
return tf.concat([coords, ones], axis=-1)
@utils.name_scope
def dehomogenize(coords):
"""Convert (x, y, w) to (x/w, y/w) or (x, y, z, w) to (x/w, y/w, z/w)."""
return tf.math.divide_no_nan(coords[Ellipsis, :-1], coords[Ellipsis, -1:])
@utils.name_scope
def texture_to_camera_coordinates(coords, intrinsics):
"""Convert texture coordinates to x,y,1 coordinates relative to camera.
Args:
coords: [..., 2] texture coordinates
intrinsics: [..., 4] (resolution-independent) camera intrinsics. Last
dimension (fx, fy, cx, cy).
Returns:
[..., 3] coordinates, transformed by scaling down by image size and
applying the inverse of the intrinsics. z-coordinates are all 1.
Raises:
ValueError: if coords is the wrong shape.
"""
check_input_shape('coords', coords, -1, 2)
# Shift to optical center and divide by focal length.
# (These are element-wise operations on the x and y coords.)
focal_length, optical_center = tf.split(intrinsics, [2, 2], axis=-1)
xy_coords = (coords - optical_center) / focal_length
return homogenize(xy_coords)
@utils.name_scope
def camera_to_texture_coordinates(coords, intrinsics):
"""Convert (x,y,z) coordinates relative to camera to texture coordinates.
Args:
coords: [..., 3] coordinates
intrinsics: [..., 4] camera intrinsics. Last dimension (fx, fy, cx, cy)
Returns:
[..., 2] coordinates, transformed by dividing by Z, applying camera
intrinsics and scaling to image size.
Raises:
ValueError: if coords is the wrong shape.
"""
check_input_shape('coords', coords, -1, 3)
xy_coords = tf.math.divide_no_nan(coords[Ellipsis, :2], coords[Ellipsis, 2:])
# Scale by focal length and shift optical center.
# (These are element-wise operations on the x and y coords.)
focal_length, optical_center = tf.split(intrinsics, [2, 2], axis=-1)
xy_coords = (xy_coords * focal_length) + optical_center
return xy_coords
@utils.name_scope
def get_camera_relative_points(indices, point, pose):
"""Get tensor of camera-relative 3d points in a frame.
Args:
indices: [B, P] Indices into point of coordinates to retrieve.
point: [B, N, 3] A set of N (x,y,z) coordinates per batch item
pose: [B, 3, 4] Camera pose
Returns:
[B, P, 3] Point coordinates corresponding to the indices.
Specifically result[b, p, :] = point[b, indices[b, p], :].
"""
# There is no "batched gather" so we either must loop over the batch, or
# use gather_nd. Looping over the batch is simpler so we'll do that.
point_shape = point.shape.as_list()
# Batch size must be statically known
assert (point_shape is not None and len(point_shape) and
point_shape[0] is not None)
batch_size = point_shape[0]
coordinates = []
for item in range(batch_size):
coordinates.append(tf.gather(point[item], indices[item]))
extracted_points = tf.stack(coordinates)
# Convert points to be camera-relative.
return mat34_transform(pose, extracted_points)
# ========== IMAGES AND SAMPLING ==========
@utils.name_scope
def pixel_center_grid(height, width):
"""Produce a grid of (x,y) texture-coordinate pairs of pixel centers.
Args:
height: (integer) height, not a tensor
width: (integer) width, not a tensor
Returns:
A tensor of shape [height, width, 2] where each entry gives the (x,y)
texture coordinates of the corresponding pixel center. For example, for
pixel_center_grid(2, 3) the result is:
[[[1/6, 1/4], [3/6, 1/4], [5/6, 1/4]],
[[1/6, 3/4], [3/6, 3/4], [5/6, 3/4]]]
"""
height_float = tf.cast(height, dtype=tf.float32)
width_float = tf.cast(width, dtype=tf.float32)
ys = tf.linspace(0.5 / height_float, 1.0 - 0.5 / height_float, height)
xs = tf.linspace(0.5 / width_float, 1.0 - 0.5 / width_float, width)
xs, ys = tf.meshgrid(xs, ys)
grid = tf.stack([xs, ys], axis=-1)
assert grid.shape.as_list() == [height, width, 2]
return grid
@utils.name_scope
def camera_rays(intrinsics, height, width):
"""A tensor of rays from the camera to the plane at z=1, one per pixel.
Args:
intrinsics: [..., 4] camera intrinsics
height: output height in pixels
width: output width in pixels
Returns:
[..., H, W, 3] A grid of H x W rays. Each ray is a vector (x, y, 1) in
camera space. For example, for a pixel at the principal point, the
corresponding ray is (0, 0, 1).
"""
coords = pixel_center_grid(height, width)
intrinsics = intrinsics[Ellipsis, tf.newaxis, tf.newaxis, :]
rays = texture_to_camera_coordinates(coords, intrinsics)
return rays
@utils.name_scope
def clip_texture_coords_to_corner_pixels(coords, height, width):
"""Clip texture coordinates to the centers of the corner pixels."""
min_x = 0.5 / width
min_y = 0.5 / height
max_x = 1.0 - min_x
max_y = 1.0 - min_y
return tf.clip_by_value(coords, [min_x, min_y], [max_x, max_y])
@utils.name_scope
def sample_image(image, coords, clamp=True):
"""Sample points from an image, using bilinear filtering.
Args:
image: [B0, ..., Bn-1, height, width, channels] image data
coords: [B0, ..., Bn-1, ..., 2] (x,y) texture coordinates
clamp: if True, coordinates are clamped to the coordinates of the corner
pixels -- i.e. minimum value 0.5/width, 0.5/height and maximum value
1.0-0.5/width or 1.0-0.5/height. This is equivalent to extending the image
in all directions by copying its edge pixels. If False, sampling values
outside the image will return 0 values.
Returns:
[B0, ..., Bn-1, ..., channels] image data, in which each value is sampled
with bilinear interpolation from the image at position indicated by the
(x,y) texture coordinates. The image and coords parameters must have
matching batch dimensions B0, ..., Bn-1.
Raises:
ValueError: if shapes are incompatible.
"""
check_input_shape('coords', coords, -1, 2)
tfshape = tf.shape(image)[-3:-1]
height = tf.cast(tfshape[0], dtype=tf.float32)
width = tf.cast(tfshape[1], dtype=tf.float32)
if clamp:
coords = clip_texture_coords_to_corner_pixels(coords, height, width)
# Resampler expects coordinates where (0,0) is the center of the top-left
# pixel and (width-1, height-1) is the center of the bottom-right pixel.
pixel_coords = coords * [width, height] - 0.5
# tfa_image.resampler only works with exactly one batch dimension, i.e. it
# expects image to be [batch, height, width, channels] and pixel_coords to be
# [batch, ..., 2]. So we need to reshape, perform the resampling, and then
# reshape back to what we had.
batch_dims = len(image.shape.as_list()) - 3
assert (image.shape.as_list()[:batch_dims] == pixel_coords.shape.as_list()
[:batch_dims])
batched_image, _ = utils.flatten_batch(image, batch_dims)
batched_coords, unflatten_coords = utils.flatten_batch(
pixel_coords, batch_dims)
resampled = tfa_image.resampler(batched_image, batched_coords)
# Convert back to the right shape to return
resampled = unflatten_coords(resampled)
return resampled
# ========== WARPS AND HOMOGRAPHIES ==========
@utils.name_scope
def inverse_homography(source_pose, source_intrinsics, target_pose,
target_intrinsics, plane):
"""Compute inverse homography from source to target.
This function computes a matrix H which relates the image of the plane P
in the source and target cameras by matrix multiplication as follows:
(source_u, source_v, source_w) = H (target_u, target_v, target_w)
where (u, v, w) are the homogeneous coordinates of the point in the
image-spaces of the source and target cameras.
The plane P is specified as a normal vector (plane[0:3]) in the source
camera-space plus an offset (plane[3]). A point p in source-camera-space
is in the plane when (p_x, p_y, p_z, 1) . P == 0.
Args:
source_pose: [..., 3, 4] source camera pose
source_intrinsics: [..., 4] last dimension (fx, fy, cx, cy)
target_pose: [..., 3, 4] target camera pose
target_intrinsics: [..., 4] last dimension (fx, fy, cx, cy)
plane: [..., 4] The plane P.
Returns:
[..., 3, 3] Homography matrix H.
"""
target_to_source_pose = mat34_product(source_pose,
mat34_pose_inverse(target_pose))
rotation, translation = tf.split(target_to_source_pose, [3, 1], axis=-1)
plane_normal = plane[Ellipsis, tf.newaxis, :3]
plane_offset = plane[Ellipsis, tf.newaxis, 3:]
# Everything now has 2 final dimensions for matrix operations, i.e.
# rotation [..., 3, 3] from target to source
# translation [..., 3, 1] from target to source, in source space
# plane_normal [..., 1, 3] in source space
# plane_offset [..., 1, 1] in source space
denominator = broadcasting_matmul(plane_normal, translation) + plane_offset
numerator = broadcasting_matmul(
broadcasting_matmul(-translation, plane_normal), rotation)
return broadcasting_matmul(
intrinsics_matrix(source_intrinsics),
broadcasting_matmul(rotation + tf.divide(numerator, denominator),
inverse_intrinsics_matrix(target_intrinsics)))
@utils.name_scope
def apply_homography(homography, coords):
"""Transform grid of (x,y) texture coordinates by a homography.
Args:
homography: [..., 3, 3]
coords: [..., H, W, 2] (x,y) texture coordinates
Returns:
[..., H, W, 2] transformed coordinates.
"""
height = tf.shape(coords)[-3]
coords = homogenize(utils.collapse_dim(coords, -2)) # [..., H*W, 3]
# Instead of transposing the coords, transpose the homography and
# swap the order of multiplication.
coords = broadcasting_matmul(coords, homography, transpose_b=True)
# coords is now [..., H*W, 3]
return utils.split_dim(dehomogenize(coords), -2, height)
@utils.name_scope
def homography_warp(image, homography, height=None, width=None, clamp=True):
"""Warp an image according to an inverse homography.
Args:
image: [..., H, W, C] input image
homography: [..., 3, 3] homography mapping output to input
height: desired output height (or None to use input height)
width: desired output width (or None to use input width)
clamp: whether to clamp image coordinates (see sample_image doc)
Returns:
[..., height, width, C] warped image.
"""
(image, homography) = utils.broadcast_to_match(
image, homography, ignore_axes=(3, 2))
if height is None:
height = image.shape.as_list()[-3]
if width is None:
width = image.shape.as_list()[-2]
target_coords = pixel_center_grid(height, width)
source_coords = apply_homography(homography, target_coords)
return sample_image(image, source_coords, clamp=clamp)
| apache-2.0 | 6,051,238,215,533,420,000 | 33.343152 | 81 | 0.65869 | false |
mgerstner/backintime | common/logger.py | 1 | 3763 | # Back In Time
# Copyright (C) 2008-2017 Oprea Dan, Bart de Koning, Richard Bailey, Germar Reitze
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import syslog
import os
import sys
import atexit
import tools
import bcolors
DEBUG = False
APP_NAME = 'backintime'
def openlog():
name = os.getenv('LOGNAME', 'unknown')
syslog.openlog("%s (%s/1)" %(APP_NAME, name))
atexit.register(closelog)
def changeProfile(profile_id):
name = os.getenv('LOGNAME', 'unknown')
syslog.openlog("%s (%s/%s)" %(APP_NAME, name, profile_id))
def closelog():
syslog.closelog()
def error(msg , parent = None, traceDepth = 0):
if DEBUG:
msg = '%s %s' %(_debugHeader(parent, traceDepth), msg)
print('%sERROR%s: %s' %(bcolors.FAIL, bcolors.ENDC, msg), file=sys.stderr)
for line in tools.wrapLine(msg):
syslog.syslog(syslog.LOG_ERR, 'ERROR: ' + line)
def warning(msg , parent = None, traceDepth = 0):
if DEBUG:
msg = '%s %s' %(_debugHeader(parent, traceDepth), msg)
print('%sWARNING%s: %s' %(bcolors.WARNING, bcolors.ENDC, msg), file=sys.stderr)
for line in tools.wrapLine(msg):
syslog.syslog(syslog.LOG_WARNING, 'WARNING: ' + line)
def info(msg , parent = None, traceDepth = 0):
if DEBUG:
msg = '%s %s' %(_debugHeader(parent, traceDepth), msg)
print('%sINFO%s: %s' %(bcolors.OKGREEN, bcolors.ENDC, msg), file=sys.stdout)
for line in tools.wrapLine(msg):
syslog.syslog(syslog.LOG_INFO, 'INFO: ' + line)
def debug(msg, parent = None, traceDepth = 0):
if DEBUG:
msg = '%s %s' %(_debugHeader(parent, traceDepth), msg)
print('%sDEBUG%s: %s' %(bcolors.OKBLUE, bcolors.ENDC, msg), file = sys.stdout)
for line in tools.wrapLine(msg):
syslog.syslog(syslog.LOG_DEBUG, 'DEBUG: %s' %line)
def deprecated(parent = None):
frame = sys._getframe(1)
fdir, fname = os.path.split(frame.f_code.co_filename)
fmodule = os.path.basename(fdir)
line = frame.f_lineno
if parent:
fclass = '%s.' %parent.__class__.__name__
else:
fclass = ''
func = frame.f_code.co_name
frameCaller = sys._getframe(2)
fdirCaller, fnameCaller = os.path.split(frameCaller.f_code.co_filename)
fmoduleCaller = os.path.basename(fdirCaller)
lineCaller = frameCaller.f_lineno
msg = '%s/%s:%s %s%s called from ' %(fmodule, fname, line, fclass, func)
msgCaller = '%s/%s:%s' %(fmoduleCaller, fnameCaller, lineCaller)
print('%sDEPRECATED%s: %s%s%s%s' %(bcolors.WARNING, bcolors.ENDC, msg, bcolors.OKBLUE, msgCaller, bcolors.ENDC), file=sys.stderr)
syslog.syslog(syslog.LOG_WARNING, 'DEPRECATED: %s%s' %(msg, msgCaller))
def _debugHeader(parent, traceDepth):
frame = sys._getframe(2 + traceDepth)
fdir, fname = os.path.split(frame.f_code.co_filename)
fmodule = os.path.basename(fdir)
line = frame.f_lineno
if parent:
fclass = '%s.' %parent.__class__.__name__
else:
fclass = ''
func = frame.f_code.co_name
return '[%s/%s:%s %s%s]' %(fmodule, fname, line, fclass, func)
| gpl-2.0 | 4,494,096,716,557,691,400 | 36.257426 | 133 | 0.652405 | false |
vincentadam87/gatsby-hackathon-seizure | code/python/seizures/examples/cross_validation_test.py | 1 | 5173 | '''
Created on 10 August 2014
@author: vincent
'''
# Loading necessary packages
import numpy as np
import sys
from seizures.data.DataLoader_v2 import DataLoader
from seizures.evaluation.XValidation import XValidation
from seizures.evaluation.performance_measures import accuracy, auc
from seizures.features.FeatureExtractBase import FeatureExtractBase
from seizures.features.MixFeatures import MixFeatures
from seizures.features.SEFeatures import SEFeatures
from seizures.features.StatsFeatures import StatsFeatures
from seizures.features.PLVFeatures import PLVFeatures
from seizures.features.ARFeatures import ARFeatures
from seizures.features.LyapunovFeatures import LyapunovFeatures
from seizures.prediction.ForestPredictor import ForestPredictor
from seizures.prediction.SVMPredictor import SVMPredictor
from seizures.prediction.XtraTreesPredictor import XtraTreesPredictor
from seizures.Global import Global
from sklearn.cross_validation import train_test_split
def Xval_on_single_patient(predictor_cls, feature_extractor, patient_name="Dog_1",preprocess=True):
"""
Single patient cross validation
Returns 2 lists of cross validation performances
:param predictor_cls:
:param feature_extractor
:param patient_name:
:return:
"""
# predictor_cls is a handle to an instance of PredictorBase
# Instantiate the predictor
predictor = predictor_cls()
base_dir = Global.path_map('clips_folder')
base_dir = '/nfs/data3/kaggle_seizure/clips/'
loader = DataLoader(base_dir, feature_extractor)
X_list,y_seizure, y_early = loader.blocks_for_Xvalidation(patient_name,preprocess=preprocess)
#X_train,y_seizure, y_early = loader.training_data(patient_name)
#y_train = [y_seizure,y_early]
#X_list,y_list = train_test_split(X_train,y_train)
# running cross validation
print patient_name
print "\ncross validation: seizures vs not"
result_seizure = XValidation.evaluate(X_list, y_seizure, predictor, evaluation=auc)
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(result_seizure), np.std(result_seizure), result_seizure)
print "\ncross validation: early_vs_not"
result_early = XValidation.evaluate(X_list, y_early, predictor, evaluation=auc)
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(result_early), np.std(result_early), result_early)
return result_seizure,result_early
def Xval_on_patients(predictor_cls, feature_extractor, patients_list=['Dog_1'],preprocess=True):
''' Runs cross validation for given predictor class and feature instance on the given list of patients
INPUT:
- predictor_cls: a Predictor class (implement)
- feature_extractor: an instanciation of a Features class
- patients_list: a list of subject strings e.g., ['Dog_1', 'Patient_2']
'''
assert(isinstance(feature_extractor, FeatureExtractBase))
results_seizure = []
results_early = []
for patient_name in patients_list:
result_seizure, result_early = Xval_on_single_patient(predictor_cls, feature_extractor, patient_name, preprocess=preprocess)
results_seizure.append(result_seizure)
results_early.append(result_early)
avg_results_seizure = np.mean(np.array(results_seizure),axis=0)
avg_results_early = np.mean(np.array(results_early),axis=0)
print "\ncross validation: seizures vs not (ACROSS ALL SUBJECTS)"
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(avg_results_seizure), np.std(avg_results_seizure), avg_results_seizure)
print "\ncross validation: early_vs_not (ACROSS ALL SUBJECTS)"
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(avg_results_early), np.std(avg_results_early), avg_results_early)
return avg_results_seizure, avg_results_early
# generate prediction for test data
def main():
# code run at script launch
#patient_name = sys.argv[1]
# There are Dog_[1-4] and Patient_[1-8]
patients_list = ["Dog_%d" % i for i in range(1, 5)] + ["Patient_%d" % i for i in range(1, 9)]
patients_list = ["Dog_%d" % i for i in [1]] #["Patient_%d" % i for i in range(1, 9)]#++
#feature_extractor = MixFeatures([{'name':"ARFeatures",'args':{}}])
#feature_extractor = PLVFeatures()
#feature_extractor = MixFeatures([{'name':"PLVFeatures",'args':{}},{'name':"ARFeatures",'args':{}}])
#feature_extractor = ARFeatures()
feature_extractor = MixFeatures([{'name':"ARFeatures",'args':{}},{'name':"PLVFeatures",'args':{}},{'name':'SEFeatures','args':{}}])
#feature_extractor = SEFeatures()
#feature_extractor = LyapunovFeatures()
#feature_extractor = StatsFeatures()
preprocess = True
predictor = SVMPredictor
#predictor = XtraTreesPredictor
if preprocess==True:
print 'Preprocessing ON'
else:
print 'Preprocessing OFF'
print 'predictor: ',predictor
Xval_on_patients(predictor,feature_extractor, patients_list,preprocess=preprocess)
if __name__ == '__main__':
main()
| bsd-2-clause | 6,804,000,983,729,179,000 | 38.792308 | 135 | 0.70462 | false |
mfalesni/cfme_tests | cfme/networks/subnet.py | 1 | 7113 | import attr
from navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic.exceptions import NoSuchElementException
from cfme.common import Taggable
from cfme.exceptions import ItemNotFound
from cfme.modeling.base import BaseCollection, BaseEntity, parent_of_type
from cfme.networks.views import SubnetDetailsView, SubnetView, SubnetAddView, SubnetEditView
from cfme.utils import providers, version
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from cfme.utils.wait import wait_for
@attr.s
class Subnet(Taggable, BaseEntity):
"""Class representing subnets in sdn"""
in_version = ('5.8', version.LATEST)
category = 'networks'
string_name = 'NetworkSubnet'
quad_name = None
db_types = ['NetworkSubnet']
name = attr.ib()
provider_obj = attr.ib(default=None)
network = attr.ib(default=None)
@property
def exists(self):
try:
navigate_to(self, 'Details')
except (ItemNotFound, NoSuchElementException):
return False
else:
return True
def edit(self, new_name, gateway=None):
"""Edit cloud subnet
Args:
new_name: (str) new name of subnet
gateway: (str) IP of new gateway, for example: 11.11.11.1
"""
view = navigate_to(self, 'Edit')
view.fill({'subnet_name': new_name,
'gateway': gateway})
view.save.click()
view.flash.assert_success_message('Cloud Subnet "{}" updated'.format(new_name))
self.name = new_name
def delete(self):
"""Deletes this subnet"""
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select('Delete this Cloud Subnet', handle_alert=True)
view.flash.assert_success_message('The selected Cloud Subnet was deleted')
@property
def cloud_tenant(self):
""" Return name of tenant that subnet belongs to"""
view = navigate_to(self, 'Details')
return view.entities.relationships.get_text_of('Cloud tenant')
@property
def cloud_network(self):
""" Return name of network that subnet belongs to"""
view = navigate_to(self, 'Details')
field_name = 'Cloud Network' if self.appliance.version >= '5.9' else 'Cloud network'
return view.entities.relationships.get_text_of(field_name)
@property
def cidr(self):
""" Return subnet's CIDR"""
view = navigate_to(self, 'Details')
field_name = 'CIDR' if self.appliance.version >= '5.9' else 'Cidr'
return view.entities.properties.get_text_of(field_name)
@property
def net_protocol(self):
""" Return subnet's network protocol"""
view = navigate_to(self, 'Details')
return view.entities.properties.get_text_of('Network protocol')
@property
def provider(self):
from cfme.networks.provider import NetworkProvider
return parent_of_type(self, NetworkProvider)
@property
def parent_provider(self):
""" Return object of parent cloud provider """
view = navigate_to(self, 'Details')
field_name = 'Parent Cloud Provider' if self.appliance.version >= '5.9' else 'Parent ems cloud'
provider_name = view.entities.relationships.get_text_of(field_name)
return providers.get_crud_by_name(provider_name)
@property
def network_provider(self):
""" Returns network provider """
# security group collection contains reference to provider
if self.provider:
return self.provider
# otherwise get provider name from ui
view = navigate_to(self, 'Details')
try:
prov_name = view.entities.relationships.get_text_of("Network Manager")
collection = self.appliance.collections.network_provider
return collection.instantiate(name=prov_name)
except ItemNotFound: # BZ 1480577
return None
@property
def zone(self):
view = navigate_to(self, 'Details')
return view.entities.relationships.get_text_of('Zone')
@attr.s
class SubnetCollection(BaseCollection):
""" Collection object for Subnet object
Note: Network providers object are not implemented in mgmt
"""
ENTITY = Subnet
def create(self, name, tenant, provider, network_manager, network_name, cidr, gateway=None):
"""Create subnet
Args:
name: (str) name of the subnet
tenant: (str) name of the tenant to place subnet to
provider: crud object of Openstack cloud provider
network_manager: (str) name of network manager
network_name: (str) name of the network to create subnet under
cidr: (str) CIDR of subnet, for example: 192.168.12.2/24
gateway: (str) gateway of subnet, if None - appliance will set it automatically
Returns: instance of cfme.newtorks.subnet.Subnet
"""
view = navigate_to(self, 'Add')
view.fill({'network_manager': network_manager,
'network': network_name,
'subnet_name': name,
'subnet_cidr': cidr,
'gateway': gateway,
'cloud_tenant': tenant})
view.add.click()
view.flash.assert_success_message('Cloud Subnet "{}" created'.format(name))
subnet = self.instantiate(name, provider, network_name)
# Refresh provider's relationships to have new subnet displayed
wait_for(provider.is_refreshed, func_kwargs=dict(refresh_delta=10), timeout=600)
wait_for(lambda: subnet.exists, timeout=100, fail_func=subnet.browser.refresh)
return subnet
def all(self):
if self.filters.get('parent'):
view = navigate_to(self.filters.get('parent'), 'CloudSubnets')
else:
view = navigate_to(self, 'All')
list_networks_obj = view.entities.get_all(surf_pages=True)
return [self.instantiate(name=p.name) for p in list_networks_obj]
@navigator.register(SubnetCollection, 'All')
class All(CFMENavigateStep):
VIEW = SubnetView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
self.prerequisite_view.navigation.select('Networks', 'Subnets')
@navigator.register(Subnet, 'Details')
class OpenCloudNetworks(CFMENavigateStep):
VIEW = SubnetDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self):
self.prerequisite_view.entities.get_entity(name=self.obj.name).click()
@navigator.register(SubnetCollection, 'Add')
class AddSubnet(CFMENavigateStep):
VIEW = SubnetAddView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Add a new Cloud Subnet')
@navigator.register(Subnet, 'Edit')
class EditSubnet(CFMENavigateStep):
VIEW = SubnetEditView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Edit this Cloud Subnet')
| gpl-2.0 | 3,661,567,692,700,319,000 | 35.476923 | 103 | 0.654014 | false |
DigiThinkIT/stem | test/integ/descriptor/reader.py | 1 | 18237 | """
Integration tests for stem.descriptor.reader.
"""
import getpass
import os
import signal
import sys
import tarfile
import time
import unittest
import stem.descriptor.reader
import test.runner
from stem.util import system
BASIC_LISTING = """
/tmp 123
/bin/grep 4567
/file with spaces/and \\ stuff 890
"""
my_dir = os.path.dirname(__file__)
DESCRIPTOR_TEST_DATA = os.path.join(my_dir, 'data')
TAR_DESCRIPTORS = None
def _get_processed_files_path():
return test.runner.get_runner().get_test_dir('descriptor_processed_files')
def _make_processed_files_listing(contents):
"""
Writes the given 'processed file' listing to disk, returning the path where
it is located.
"""
test_listing_path = _get_processed_files_path()
test_listing_file = open(test_listing_path, 'w')
test_listing_file.write(contents)
test_listing_file.close()
return test_listing_path
def _get_raw_tar_descriptors():
global TAR_DESCRIPTORS
if not TAR_DESCRIPTORS:
test_path = os.path.join(DESCRIPTOR_TEST_DATA, 'descriptor_archive.tar')
raw_descriptors = []
# TODO: revert to using the 'with' keyword for this when dropping python
# 2.6 support
tar_file = None
try:
tar_file = tarfile.open(test_path)
for tar_entry in tar_file:
if tar_entry.isfile():
entry = tar_file.extractfile(tar_entry)
entry.readline() # strip header
raw_descriptors.append(entry.read().decode('utf-8', 'replace'))
entry.close()
finally:
if tar_file:
tar_file.close()
TAR_DESCRIPTORS = raw_descriptors
return TAR_DESCRIPTORS
class SkipListener:
def __init__(self):
self.results = [] # (path, exception) tuples that we've received
def listener(self, path, exception):
self.results.append((path, exception))
class TestDescriptorReader(unittest.TestCase):
def tearDown(self):
# cleans up 'processed file' listings that we made
test_listing_path = _get_processed_files_path()
if os.path.exists(test_listing_path):
os.remove(test_listing_path)
def test_load_processed_files(self):
"""
Basic sanity test for loading a processed files listing from disk.
"""
test_listing_path = _make_processed_files_listing(BASIC_LISTING)
loaded_listing = stem.descriptor.reader.load_processed_files(test_listing_path)
expected_listing = {
'/tmp': 123,
'/bin/grep': 4567,
'/file with spaces/and \\ stuff': 890,
}
self.assertEquals(expected_listing, loaded_listing)
def test_load_processed_files_missing(self):
"""
Tests the load_processed_files() function with a file that doesn't exist.
"""
self.assertRaises(IOError, stem.descriptor.reader.load_processed_files, '/non-existant/path')
def test_load_processed_files_permissions(self):
"""
Tests the load_processed_files() function with a file that can't be read
due to permissions.
"""
# test relies on being unable to read a file
if getpass.getuser() == 'root':
test.runner.skip(self, '(running as root)')
return
# Skip the test on windows, since you can only set the file's
# read-only flag with os.chmod(). For more information see...
# http://docs.python.org/library/os.html#os.chmod
if system.is_windows():
test.runner.skip(self, '(chmod not functional)')
test_listing_path = _make_processed_files_listing(BASIC_LISTING)
os.chmod(test_listing_path, 0077) # remove read permissions
self.assertRaises(IOError, stem.descriptor.reader.load_processed_files, test_listing_path)
def test_save_processed_files(self):
"""
Basic sanity test for persisting files listings to disk.
"""
initial_listing = {
'/tmp': 123,
'/bin/grep': 4567,
'/file with spaces/and \\ stuff': 890,
}
# saves the initial_listing to a file then reloads it
test_listing_path = _get_processed_files_path()
stem.descriptor.reader.save_processed_files(test_listing_path, initial_listing)
loaded_listing = stem.descriptor.reader.load_processed_files(test_listing_path)
self.assertEquals(initial_listing, loaded_listing)
def test_save_processed_files_malformed(self):
"""
Tests the save_processed_files() function with malformed data.
"""
missing_filename = {'': 123}
relative_filename = {'foobar': 123}
string_timestamp = {'/tmp': '123a'}
for listing in (missing_filename, relative_filename, string_timestamp):
self.assertRaises(TypeError, stem.descriptor.reader.save_processed_files, '/tmp/foo', listing)
# Though our attempts to save the processed files fail we'll write an empty
# file. Cleaning it up.
try:
os.remove('/tmp/foo')
except:
pass
def test_basic_example(self):
"""
Exercises something similar to the first example in the header
documentation, checking that some of the contents match what we'd expect.
"""
# snag some of the plaintext descriptors so we can later make sure that we
# iterate over them
descriptor_entries = []
descriptor_path = os.path.join(DESCRIPTOR_TEST_DATA, 'example_descriptor')
with open(descriptor_path) as descriptor_file:
descriptor_file.readline() # strip header
descriptor_entries.append(descriptor_file.read())
# running this test multiple times to flush out concurrency issues
for _ in xrange(15):
remaining_entries = list(descriptor_entries)
with stem.descriptor.reader.DescriptorReader(descriptor_path) as reader:
for descriptor in reader:
descriptor_str = str(descriptor)
if descriptor_str in remaining_entries:
remaining_entries.remove(descriptor_str)
else:
# iterator is providing output that we didn't expect
self.fail()
# check that we've seen all of the descriptor_entries
self.assertTrue(len(remaining_entries) == 0)
def test_multiple_runs(self):
"""
Runs a DescriptorReader instance multiple times over the same content,
making sure that it can be used repeatedly.
"""
descriptor_path = os.path.join(DESCRIPTOR_TEST_DATA, 'example_descriptor')
reader = stem.descriptor.reader.DescriptorReader(descriptor_path)
with reader:
self.assertEquals(1, len(list(reader)))
# run it a second time, this shouldn't provide any descriptors because we
# have already read it
with reader:
self.assertEquals(0, len(list(reader)))
# clear the DescriptorReader's memory of seeing the file and run it again
reader.set_processed_files([])
with reader:
self.assertEquals(1, len(list(reader)))
def test_buffer_size(self):
"""
Checks that we can process sets of descriptors larger than our buffer size,
that we don't exceed it, and that we can still stop midway through reading
them.
"""
reader = stem.descriptor.reader.DescriptorReader(DESCRIPTOR_TEST_DATA, buffer_size = 2)
with reader:
self.assertTrue(reader.get_buffered_descriptor_count() <= 2)
time.sleep(0.01)
self.assertTrue(reader.get_buffered_descriptor_count() <= 2)
def test_persistence_path(self):
"""
Check that the persistence_path argument loads and saves a a processed
files listing.
"""
persistence_path = _get_processed_files_path()
descriptor_path = os.path.join(DESCRIPTOR_TEST_DATA, 'example_descriptor')
# First run where the persistence_path doesn't yet exist. This just tests
# the saving functionality.
reader = stem.descriptor.reader.DescriptorReader(descriptor_path, persistence_path = persistence_path)
with reader:
self.assertEqual(1, len(list(reader)))
# check that we've saved reading example_descriptor
self.assertTrue(os.path.exists(persistence_path))
with open(persistence_path) as persistence_file:
persistance_file_contents = persistence_file.read()
self.assertTrue(persistance_file_contents.startswith(descriptor_path))
# Try running again with a new reader but the same persistance path, if it
# reads and takes the persistence_path into account then it won't read the
# descriptor file. This in essence just tests its loading functionality.
reader = stem.descriptor.reader.DescriptorReader(descriptor_path, persistence_path = persistence_path)
with reader:
self.assertEqual(0, len(list(reader)))
def test_archived_paths(self):
"""
Checks the get_path() and get_archive_path() for a tarball.
"""
expected_archive_paths = (
'descriptor_archive/0/2/02c311d3d789f3f55c0880b5c85f3c196343552c',
'descriptor_archive/1/b/1bb798cae15e21479db0bc700767eee4733e9d4a',
'descriptor_archive/1/b/1ef75fef564180d8b3f72c6f8635ff0cd855f92c',
)
test_path = os.path.join(DESCRIPTOR_TEST_DATA, 'descriptor_archive.tar')
with stem.descriptor.reader.DescriptorReader(test_path) as reader:
for desc in reader:
self.assertEqual(test_path, desc.get_path())
self.assertTrue(desc.get_archive_path() in expected_archive_paths)
def test_archived_uncompressed(self):
"""
Checks that we can read descriptors from an uncompressed archive.
"""
expected_results = _get_raw_tar_descriptors()
test_path = os.path.join(DESCRIPTOR_TEST_DATA, 'descriptor_archive.tar')
with stem.descriptor.reader.DescriptorReader(test_path) as reader:
read_descriptors = [str(desc) for desc in list(reader)]
self.assertEquals(expected_results, read_descriptors)
def test_archived_gzip(self):
"""
Checks that we can read descriptors from a gzipped archive.
"""
expected_results = _get_raw_tar_descriptors()
test_path = os.path.join(DESCRIPTOR_TEST_DATA, 'descriptor_archive.tar.gz')
with stem.descriptor.reader.DescriptorReader(test_path) as reader:
read_descriptors = [str(desc) for desc in list(reader)]
self.assertEquals(expected_results, read_descriptors)
def test_archived_bz2(self):
"""
Checks that we can read descriptors from an bzipped archive.
"""
expected_results = _get_raw_tar_descriptors()
test_path = os.path.join(DESCRIPTOR_TEST_DATA, 'descriptor_archive.tar.bz2')
with stem.descriptor.reader.DescriptorReader(test_path) as reader:
read_descriptors = [str(desc) for desc in list(reader)]
self.assertEquals(expected_results, read_descriptors)
def test_stop(self):
"""
Runs a DescriptorReader over the root directory, then checks that calling
stop() makes it terminate in a timely fashion.
"""
# Skip on windows since SIGALRM is unavailable
if system.is_windows():
test.runner.skip(self, '(SIGALRM unavailable)')
is_test_running = True
reader = stem.descriptor.reader.DescriptorReader('/usr')
# Fails the test after a couple seconds if we don't finish successfully.
# Depending on what we're blocked on this might not work when the test
# fails, requiring that we give a manual kill to the test.
def timeout_handler(signum, frame):
if is_test_running:
self.fail()
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(2)
reader.start()
time.sleep(0.1)
reader.stop()
is_test_running = False
def test_get_processed_files(self):
"""
Checks that get_processed_files() provides the expected results after
iterating over our test data.
"""
expected_results = {}
for root, _, files in os.walk(DESCRIPTOR_TEST_DATA):
for filename in files:
path = os.path.join(root, filename)
last_modified = int(os.stat(path).st_mtime)
expected_results[path] = last_modified
reader = stem.descriptor.reader.DescriptorReader(DESCRIPTOR_TEST_DATA)
with reader:
list(reader) # iterates over all of the descriptors
self.assertEquals(expected_results, reader.get_processed_files())
def test_skip_nondescriptor_contents(self):
"""
Checks that the reader properly reports when it skips both binary and
plaintext non-descriptor files.
"""
skip_listener = SkipListener()
reader = stem.descriptor.reader.DescriptorReader(DESCRIPTOR_TEST_DATA)
reader.register_skip_listener(skip_listener.listener)
expected_skip_files = ('riddle', 'tiny.png', 'vote', 'new_metrics_type')
with reader:
list(reader) # iterates over all of the descriptors
# strip anything with a .swp suffix (vim tmp files)
skip_listener.results = [(path, exc) for (path, exc) in skip_listener.results if not path.endswith('.swp')]
if len(skip_listener.results) != len(expected_skip_files):
expected_label = ',\n '.join(expected_skip_files)
results_label = ',\n '.join(['%s (%s)' % (path, exc) for (path, exc) in skip_listener.results])
self.fail('Skipped files that we should have been able to parse.\n\nExpected:\n %s\n\nResult:\n %s' % (expected_label, results_label))
for skip_path, skip_exception in skip_listener.results:
if not os.path.basename(skip_path) in expected_skip_files:
self.fail('Unexpected non-descriptor content: %s' % skip_path)
self.assertTrue(isinstance(skip_exception, stem.descriptor.reader.UnrecognizedType))
def test_skip_listener_already_read(self):
"""
Checks that calling set_processed_files() prior to reading makes us skip
those files. This also doubles for testing that skip listeners are notified
of files that we've already read.
"""
# path that we want the DescriptorReader to skip
test_path = os.path.join(DESCRIPTOR_TEST_DATA, 'example_descriptor')
initial_processed_files = {test_path: sys.maxint}
skip_listener = SkipListener()
reader = stem.descriptor.reader.DescriptorReader(test_path)
reader.register_skip_listener(skip_listener.listener)
reader.set_processed_files(initial_processed_files)
self.assertEquals(initial_processed_files, reader.get_processed_files())
with reader:
list(reader) # iterates over all of the descriptors
self.assertEquals(1, len(skip_listener.results))
skipped_path, skip_exception = skip_listener.results[0]
self.assertEqual(test_path, skipped_path)
self.assertTrue(isinstance(skip_exception, stem.descriptor.reader.AlreadyRead))
self.assertEqual(sys.maxint, skip_exception.last_modified_when_read)
def test_skip_listener_unrecognized_type(self):
"""
Listens for a file that's skipped because its file type isn't recognized.
"""
# types are solely based on file extensions so making something that looks
# like an png image
test_path = test.runner.get_runner().get_test_dir('test.png')
try:
test_file = open(test_path, 'w')
test_file.write('test data for test_skip_listener_unrecognized_type()')
test_file.close()
skip_listener = SkipListener()
reader = stem.descriptor.reader.DescriptorReader(test_path)
reader.register_skip_listener(skip_listener.listener)
with reader:
list(reader) # iterates over all of the descriptors
self.assertEqual(1, len(skip_listener.results))
skipped_path, skip_exception = skip_listener.results[0]
self.assertEqual(test_path, skipped_path)
self.assertTrue(isinstance(skip_exception, stem.descriptor.reader.UnrecognizedType))
self.assertTrue(skip_exception.mime_type in (('image/png', None), ('image/x-png', None)))
finally:
if os.path.exists(test_path):
os.remove(test_path)
def test_skip_listener_read_failure(self):
"""
Listens for a file that's skipped because we lack read permissions.
"""
# test relies on being unable to read a file
if getpass.getuser() == 'root':
test.runner.skip(self, '(running as root)')
return
elif system.is_windows():
test.runner.skip(self, '(chmod not functional)')
return
test_path = test.runner.get_runner().get_test_dir('secret_file')
try:
test_file = open(test_path, 'w')
test_file.write('test data for test_skip_listener_unrecognized_type()')
test_file.close()
os.chmod(test_path, 0077) # remove read permissions
skip_listener = SkipListener()
reader = stem.descriptor.reader.DescriptorReader(test_path)
reader.register_skip_listener(skip_listener.listener)
with reader:
list(reader) # iterates over all of the descriptors
self.assertEqual(1, len(skip_listener.results))
skipped_path, skip_exception = skip_listener.results[0]
self.assertEqual(test_path, skipped_path)
self.assertTrue(isinstance(skip_exception, stem.descriptor.reader.ReadFailed))
self.assertTrue(isinstance(skip_exception.exception, IOError))
finally:
if os.path.exists(test_path):
os.remove(test_path)
def test_skip_listener_file_missing(self):
"""
Listens for a file that's skipped because the file doesn't exist.
"""
test_path = '/non-existant/path'
skip_listener = SkipListener()
reader = stem.descriptor.reader.DescriptorReader(test_path)
reader.register_skip_listener(skip_listener.listener)
with reader:
list(reader) # iterates over all of the descriptors
self.assertEqual(1, len(skip_listener.results))
skipped_path, skip_exception = skip_listener.results[0]
self.assertEqual(test_path, skipped_path)
self.assertTrue(isinstance(skip_exception, stem.descriptor.reader.FileMissing))
def test_unrecognized_metrics_type(self):
"""
Parses a file that has a valid metrics header, but an unrecognized type.
"""
test_path = test.integ.descriptor.get_resource('new_metrics_type')
skip_listener = SkipListener()
reader = stem.descriptor.reader.DescriptorReader(test_path)
reader.register_skip_listener(skip_listener.listener)
with reader:
list(reader) # iterates over all of the descriptors
self.assertEqual(1, len(skip_listener.results))
skipped_path, skip_exception = skip_listener.results[0]
self.assertEqual(test_path, skipped_path)
self.assertTrue(isinstance(skip_exception, stem.descriptor.reader.UnrecognizedType))
self.assertEqual((None, None), skip_exception.mime_type)
| lgpl-3.0 | -611,135,739,112,700,200 | 31.450178 | 142 | 0.692767 | false |
windelbouwman/ppci-mirror | test/wasm/test_instruction.py | 1 | 3046 | """
Basic instruction tests, like nesting.
"""
from ppci.wasm import Module, run_wasm_in_node, has_node
def dedent(code):
return '\n'.join(line[4: ]for line in code.splitlines()).strip() + '\n'
def test_instructions1():
""" Test canoncocal form of import and func and inline typedefs.
"""
# The canonical form
CODE0 = dedent("""
(module
(type $print (func (param i32)))
(type $2 (func))
(import "js" "print_ln" (func $print (type $print)))
(start $main)
(func $main (type $2)
i32.const 1
if
i32.const 4
i32.const 3
i32.add
call $print
else
i32.const 5
call $print
end)
)
""")
# Test main code
m0 = Module(CODE0)
assert m0.to_string() == CODE0
b0 = m0.to_bytes()
assert Module(b0).to_bytes() == b0
if has_node():
assert run_wasm_in_node(m0, True) == '7'
# Variant 1 - no inentation, nested test for if
CODE1 = dedent("""
(module
(type $print (func (param i32)))
(type $2 (func))
(import "js" "print_ln" (func $print (type $print)))
(start $main)
(func $main (type $2)
(i32.const 1)
if
(i32.const 4)
(i32.const 3)
(i32.add)
(call $print)
else
(i32.const 5)
(call $print)
end
)
)
""")
m1 = Module(CODE1)
assert m1.to_string() == CODE0
assert m1.to_bytes() == b0
# Variant 2 - nesting all the way
CODE2 = dedent("""
(module
(type $print (func (param i32)))
(type $2 (func))
(import "js" "print_ln" (func $print (type $print)))
(start $main)
(func $main (type $2)
(if (i32.const 1)
(then
(i32.const 4)
(i32.const 3)
(i32.add)
(call $print)
)
(else
(i32.const 5)
(call $print)
)
)
)
)
""")
m2 = Module(CODE2)
assert m2.to_string() == CODE0
assert m2.to_bytes() == b0
# Variant 3 - leave out the else clause
# This is described as an "abbreviation", but it seems that we don't
# have to always output an else clause in binary form either.
CODE3 = dedent("""
(module
(type $print (func (param i32)))
(type $2 (func))
(import "js" "print_ln" (func $print (type $print)))
(start $main)
(func $main (type $2)
(if (i32.const 1)
(then
(i32.const 4)
(i32.const 3)
(i32.add)
(call $print)
)
)
)
)
""")
m3 = Module(CODE3)
assert m3.to_string() != CODE0
assert m3.to_bytes() != b0
if has_node():
assert run_wasm_in_node(m3, True) == '7'
if __name__ == '__main__':
test_instructions1()
| bsd-2-clause | -6,786,285,895,563,316,000 | 22.430769 | 75 | 0.459947 | false |
elena/django | django/conf/global_settings.py | 6 | 22349 | """
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# People who get code error notifications.
# In the format [('Full Name', '[email protected]'), ('Full Name', '[email protected]')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ar-dz', gettext_noop('Algerian Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('hy', gettext_noop('Armenian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('ig', gettext_noop('Igbo')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kab', gettext_noop('Kabyle')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('ky', gettext_noop('Kyrgyz')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('tg', gettext_noop('Tajik')),
('th', gettext_noop('Thai')),
('tk', gettext_noop('Turkmen')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('uz', gettext_noop('Uzbek')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "ar-dz", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
LANGUAGE_COOKIE_SECURE = False
LANGUAGE_COOKIE_HTTPONLY = False
LANGUAGE_COOKIE_SAMESITE = None
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default charset to use for all HttpResponse objects, if a MIME type isn't
# manually specified. It's used to construct the Content-Type header.
DEFAULT_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search'),
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$'),
# re.compile(r'^/robots.txt$'),
# re.compile(r'^/phpmyadmin/'),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = 0o644
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default primary key field type.
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'DENY'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like "example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the HttpOnly flag.
SESSION_COOKIE_HTTPONLY = True
# Whether to set the flag restricting cookie leaks on cross-site requests.
# This can be 'Lax', 'Strict', 'None', or False to disable the flag.
SESSION_COOKIE_SAMESITE = 'Lax'
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of seconds a password reset link is valid for (default: 3 days).
PASSWORD_RESET_TIMEOUT = 60 * 60 * 24 * 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SAMESITE = 'Lax'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER = 'django.views.debug.ExceptionReporter'
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_REFERRER_POLICY = 'same-origin'
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
| bsd-3-clause | 7,911,465,464,581,487,000 | 33.648062 | 101 | 0.687489 | false |
IMIO/django-fixmystreet | django_fixmystreet/fixmystreet/views/reports/subscribers.py | 1 | 2154 |
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.db import IntegrityError
from django_fixmystreet.fixmystreet.models import FMSUser
from django_fixmystreet.fixmystreet.models import Report, ReportSubscription
def create(request, report_id):
report = get_object_or_404(Report, id=report_id)
#CREATE USER CITIZEN IF NECESSARY
try:
user = FMSUser.objects.get(email=request.REQUEST.get('citizen_email'))
except FMSUser.DoesNotExist:
#Add information about the citizen connected if it does not exist
user = FMSUser.objects.create(username=request.REQUEST.get('citizen_email'), email=request.REQUEST.get('citizen_email'), first_name='ANONYMOUS', last_name='ANONYMOUS', agent=False, contractor=False, manager=False, leader=False)
#VERIFY THAT A SUBSCRIPTION DOES NOT ALREADY EXIST
if not ReportSubscription.objects.filter(subscriber=user, report=report).exists():
subscriber = ReportSubscription(subscriber=user, report=report)
subscriber.save()
messages.add_message(request, messages.SUCCESS, _("You have subscribed from updates successfully"))
return HttpResponseRedirect(report.get_absolute_url())
def remove(request, report_id):
report = get_object_or_404(Report, id=report_id)
try:
user = FMSUser.objects.get(email=request.REQUEST.get('citizen_email'))
except FMSUser.DoesNotExist:
HttpResponseRedirect(report.get_absolute_url())
#VERIFY THAT A SUBSCRIPTION DOES NOT ALREADY EXIST
try:
subscription = ReportSubscription.objects.get(subscriber=user, report=report)
subscription.delete()
messages.add_message(request, messages.SUCCESS, _("You have unsubscribed from updates successfully"))
except ReportSubscription.DoesNotExist:
#Do nothing. A subscription for this user already exists...
messages.add_message(request, messages.SUCCESS, _("You have unsubscribed from updates successfully"))
return HttpResponseRedirect(report.get_absolute_url())
| agpl-3.0 | 3,971,464,901,499,527,000 | 42.959184 | 235 | 0.749304 | false |
8l/beri | cheritest/trunk/tests/branch/test_raw_bltzl_lt_back.py | 2 | 1732 | #-
# Copyright (c) 2011 Robert N. M. Watson
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
class test_raw_bltzl_lt_back(BaseBERITestCase):
def test_before_bltzl(self):
self.assertRegisterEqual(self.MIPS.a0, 1, "instruction before backward bltzl missed")
def test_bltzl_branch_delay(self):
self.assertRegisterEqual(self.MIPS.a1, 2, "instruction in branch-delay slot missed")
def test_bltzl_skipped(self):
self.assertRegisterNotEqual(self.MIPS.a2, 3, "branch didn't happen")
def test_bltzl_target(self):
self.assertRegisterEqual(self.MIPS.a3, 4, "instruction at branch target didn't run")
| apache-2.0 | 7,195,551,675,817,355,000 | 41.243902 | 93 | 0.75 | false |
tharwan/CoopCPS | mpi_result_plot.py | 1 | 2363 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from utilities import *
from image_database import saveFiguesWithData
from sys import argv, exit
from numpy import load
import argparse
import seaborn as sns
from os import path as pathtools
parser = argparse.ArgumentParser(description='process data from cluster')
parser.add_argument('file',help="what file to work on",nargs='*')
parser.add_argument('-m','--metadata',action='store_true',help="print only metadata")
parser.add_argument('-s','--save',action='store_true',help="save images to dropbox")
parser.add_argument('--save_only',action='store_true',help="save images to dropbox, do not show on screen")
parser.add_argument('-p','--save_path',help="override the dafault save path")
args = parser.parse_args()
for filename in args.file:
f = load(filename)
print filename
meta = str(f['metadata'])
meta = meta.replace(';','\n')
print meta
if args.metadata:
exit()
plt.close("all")
figs = {}
fig_k = plt.figure()
plot_appliances_aggregate(f['appliances'],f['t'])
figs['appliances']=fig_k
fig_behaviour = plt.figure(figsize=(12,6))
matrix = plot_behavior(f['beh_matrix'])
figs['behavior']=fig_behaviour
agent_power = plt.figure()
plot_agent_power(f['P_all'],f['t'][1:])
figs['agent_power']=agent_power
overall_power = plt.figure()
plot_power_usage(f['P_global'],f['t'][1:])
figs['overall_power']=overall_power
plt.figure()
plot_appl_matrix(f['appliances'])
plt.figure()
matrix = f['appliances']
app = downsample(matrix)
time = downsample(f['t'])
sns.tsplot(app,time=time, err_style="unit_traces", err_palette=sns.dark_palette("crimson", len(app)), color="k");
plt.xlabel('time')
plt.ylabel('app')
plt.figure()
s = f['selfish']
plt.plot(s)
plt.ylim([0,1])
plt.xlabel('agent')
plt.ylabel('selfishness')
meta = str(f['metadata'])
meta_dict = {pair.split(':')[0]:pair.split(':')[1] for pair in meta.split(';')}
P_max = float(meta_dict['U'])**2/float(meta_dict['Ri'])/4
p_matrix = f['P_all']
sum_P = np.mean(p_matrix,axis=1)
p_equal = P_max/float(p_matrix.shape[0])
print "p_equal", p_equal, "P_max", P_max, "ptp", np.ptp(sum_P-p_equal), "gini",gini_coeff(sum_P)
if args.save or args.save_only:
path = args.save_path
saveFiguesWithData(path, figs, str(f['metadata']),prefix=pathtools.basename(filename)[:-4])
if not(args.save_only):
plt.show()
| mit | 3,038,978,000,681,454,000 | 26.16092 | 114 | 0.677529 | false |
skibyte/gdblib | gdblib/gdbstate.py | 1 | 1084 | #
# GdbLib - A Gdb python library.
# Copyright (C) 2012 Fernando Castillo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
class GDBState():
def __init__(self):
self.connected = False
def setCurrentLocation(self,currentfile,currentline):
self.currentfile = currentfile
self.currentline = currentline
def isConnected(self):
return self.connected
def setConnected(self, connected):
self.connected = connected
| lgpl-3.0 | -217,622,145,809,591,870 | 35.133333 | 77 | 0.72417 | false |
robertsj/poropy | pyqtgraph/examples/GraphicsScene.py | 1 | 1462 | # -*- coding: utf-8 -*-
## Add path to library (just for examples; you do not need this)
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
from pyqtgraph.GraphicsScene import GraphicsScene
app = QtGui.QApplication([])
win = pg.GraphicsView()
win.show()
class Obj(QtGui.QGraphicsObject):
def __init__(self):
QtGui.QGraphicsObject.__init__(self)
GraphicsScene.registerObject(self)
def paint(self, p, *args):
p.setPen(pg.mkPen(200,200,200))
p.drawRect(self.boundingRect())
def boundingRect(self):
return QtCore.QRectF(0, 0, 20, 20)
def mouseClickEvent(self, ev):
if ev.double():
print "double click"
else:
print "click"
ev.accept()
#def mouseDragEvent(self, ev):
#print "drag"
#ev.accept()
#self.setPos(self.pos() + ev.pos()-ev.lastPos())
vb = pg.ViewBox()
win.setCentralItem(vb)
obj = Obj()
vb.addItem(obj)
obj2 = Obj()
win.addItem(obj2)
def clicked():
print "button click"
btn = QtGui.QPushButton("BTN")
btn.clicked.connect(clicked)
prox = QtGui.QGraphicsProxyWidget()
prox.setWidget(btn)
prox.setPos(100,0)
vb.addItem(prox)
g = pg.GridItem()
vb.addItem(g)
## Start Qt event loop unless running in interactive mode.
if sys.flags.interactive != 1:
app.exec_()
| mit | 6,903,885,841,460,964,000 | 21.492308 | 71 | 0.620383 | false |
interpss/DeepMachineLearning | ipss.dml/py/c_graph/single_net/predict_voltage5.py | 1 | 3237 | '''
Copyright (C) 2005-17 www.interpss.org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Use NN-model to predict the bus voltage for a set of scale-factors
Starting from the predict_voltage1.py case, the following changes are made
- The NN-Model Loadflow method is used
- ieee14-2 case is used, where PV bus limit are set to a very large number
'''
from datetime import datetime
import tensorflow as tf
import sys
sys.path.insert(0, '../..')
import lib.common_func as cf
train_points = 100
#
# load the IEEE-14Bus case
#
filename = 'testdata/ieee14-2.ieee'
noBus, noBranch = cf.ipss_app.loadCase(filename, 'NNLFLoadChangeTrainCaseBuilder')
print(filename, ' loaded, no of Buses, Branches:', noBus, ', ', noBranch)
# define model size
size = noBus * 2
#print('size: ', size)
# define model variables
W1 = tf.Variable(tf.zeros([size,size]))
b1 = tf.Variable(tf.zeros([size]))
init = tf.initialize_all_variables()
# define model
def nn_model(data):
output = tf.matmul(data, W1) + b1
return output
# define loss
x = tf.placeholder(tf.float32, [None, size])
y = tf.placeholder(tf.float32)
error = tf.square(nn_model(x) - y)
loss = tf.reduce_sum(error)
# define training optimization
optimizer = tf.train.GradientDescentOptimizer(cf.learning_rate)
train = optimizer.minimize(loss)
# run the computation graph
with tf.Session() as sess :
sess.run(init)
# run the training part
# =====================
print('Begin training: ', datetime.now())
# retrieve training set
trainSet = cf.ipss_app.getTrainSet(train_points)
train_x, train_y = cf.transfer2PyArrays(trainSet)
# run the training part
for i in range(cf.train_steps):
if (i % 1000 == 0) : print('Training step: ', i)
sess.run(train, {x:train_x, y:train_y})
print('End training: ', datetime.now())
'''
print('W1: ', sess.run(W1))
print('b1: ', sess.run(b1))
'''
# run the verification part
# =========================
# retrieve a test case
for factor in [0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.55] :
#for factor in [0.45, 1.0, 1.55] :
testCase = cf.ipss_app.getTestCase(factor)
test_x, test_y = cf.transfer2PyArrays(testCase)
# compute model output (network voltage)
model_y = sess.run(nn_model(x), {x:test_x})
#printArray(model_y, 'model_y')
netVoltage = cf.transfer2JavaDblAry(model_y[0], size)
print('model out mismatch: ', cf.ipss_app.getMismatchInfo(netVoltage))
| apache-2.0 | -7,357,071,836,146,111,000 | 27.162162 | 87 | 0.623417 | false |
curtisallen/Alarmageddon | setup.py | 1 | 1170 | from setuptools import setup
setup(
name = "Alarmageddon",
description = "Automated testing and reporting",
version = "1.0.0",
author = "Tim Stewart, Scott Hellman",
author_email = "[email protected], [email protected]",
url = "https://github.com/PearsonEducation/Alarmageddon/tarball/1.0.0",
license = "Apache2",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 2 :: Only",
"License :: OSI Approved :: Apache Software License"
],
packages = ['alarmageddon',
"alarmageddon.publishing",
"alarmageddon.validations"],
install_requires = ["fabric==1.8.0",
"Jinja2==2.7.2",
"requests==2.0.0",
"statsd==2.0.3",
"colorama==0.3.2",
"pycrypto==2.6.1",
"pika==0.9.13",
"pytest>=2.4.0",
"pytest-localserver==0.3.2"],
)
| apache-2.0 | 1,686,784,770,361,715,700 | 40.785714 | 80 | 0.461538 | false |
lzw120/django | mysite/mysite/settings.py | 1 | 5565 | # Django settings for mysite project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '/Users/zewenli/git/django/mysite/mysite/books/mydata.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '19xu0m_c&qbwxk@hl1n0um2nvfo&=@jclatjdf!#_z)z-k5s5='
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
# 'django.middleware.common.CommonMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mysite.wsgi.application'
TEMPLATE_DIRS = (
# a better idea would be like this:
# import os.path
# os.path.join(os.path.dirname(__file__), 'templates').replace('\\','/'),
# but never mind, sicne this is my own small django app
'/Users/zewenli/git/django/mysite//mysite/templates',
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.sites',
# 'django.contrib.messages',
# 'django.contrib.staticfiles',
'mysite.books',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| bsd-3-clause | -9,091,811,255,458,302,000 | 34.44586 | 140 | 0.679245 | false |
F483/btctxstore | tests/api/validate_wallet.py | 2 | 2131 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2015 Fabian Barkhau <[email protected]>
# License: MIT (see LICENSE file)
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from btctxstore import exceptions
from btctxstore import BtcTxStore
S_HWIF = "tprv8ZgxMBicQKsPeFZuyoMy197pX8iJeTasLVYEeb68cj489fmCz7kxP8vAfJp3rWebDT3nLvDsBaz3CqnUfdhr1wbjmhUNwK5Hf7dN2Er9btD"
B_HWIF = b"tprv8ZgxMBicQKsPeFZuyoMy197pX8iJeTasLVYEeb68cj489fmCz7kxP8vAfJp3rWebDT3nLvDsBaz3CqnUfdhr1wbjmhUNwK5Hf7dN2Er9btD"
U_HWIF = u"tprv8ZgxMBicQKsPeFZuyoMy197pX8iJeTasLVYEeb68cj489fmCz7kxP8vAfJp3rWebDT3nLvDsBaz3CqnUfdhr1wbjmhUNwK5Hf7dN2Er9btD"
class TestValidateWallet(unittest.TestCase):
def setUp(self):
self.testnet_api = BtcTxStore(dryrun=True, testnet=True)
self.mainnet_api = BtcTxStore(dryrun=True, testnet=False)
self.testnet_wallet = self.testnet_api.create_wallet()
self.mainnet_wallet = self.mainnet_api.create_wallet()
self.testnet_key = self.testnet_api.get_key(self.testnet_wallet)
self.mainnet_key = self.mainnet_api.get_key(self.mainnet_wallet)
def test_checks_networks(self):
self.assertTrue(self.testnet_api.validate_wallet(self.testnet_wallet))
self.assertTrue(self.mainnet_api.validate_wallet(self.mainnet_wallet))
self.assertFalse(self.testnet_api.validate_wallet(self.mainnet_wallet))
self.assertFalse(self.mainnet_api.validate_wallet(self.testnet_wallet))
def test_doesnt_validate_keys(self):
self.assertFalse(self.testnet_api.validate_wallet(self.testnet_key))
self.assertFalse(self.mainnet_api.validate_wallet(self.testnet_key))
self.assertFalse(self.testnet_api.validate_wallet(self.mainnet_key))
self.assertFalse(self.mainnet_api.validate_wallet(self.mainnet_key))
def test_correct_types(self):
self.assertTrue(self.testnet_api.validate_wallet(S_HWIF))
self.assertTrue(self.testnet_api.validate_wallet(B_HWIF))
self.assertTrue(self.testnet_api.validate_wallet(U_HWIF))
if __name__ == '__main__':
unittest.main()
| mit | -3,874,034,809,327,010,000 | 43.395833 | 123 | 0.758799 | false |
willsirius/DualTreeRRTStartMotionPlanning | pythonVision2/userdefined.py | 1 | 13777 | import time
import openravepy
import sys
import numpy as np
from numpy import sin,cos
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# import random
import transformationFunction as tf
import kdtree
import scipy.spatial as spatial
import cgkit.all as cg
# weight of quaternion in distance
QWEIGHT = 0.1
def S2CQ(s):
# return the configuration part of a state point
# s = [x,v,Q,W]
return s[0:3]+s[6:10]
def nnNodeIndexPathSQ(s,path,startIndex):
# return the nearest node index in path of state point s, index smaller than startIndex won't count
minL = 1000
for i in range(startIndex,len(path)):
dist = distCSQ(s,path[i])
if minL > dist:
minIndex = i
minL = dist
return minIndex
def sampleFSC(s,c):
# sample force vector at state s while setting c as the goal
# error is the position error with the goal
# stdev = 1
# q = s[6:10]
# R = Q2R(q)
# zw = np.matmul(R,[0,0,1])
# zproj = zw[2]
# average = 4.5325/zproj
# # for the control part
# R = Q2R(s[6:10])
# X = Q2R(s[0:3])
# V = np.array(s[3:6])
# O = np.array(s[10:13])
# dR = Q2R(c[3:7])
# dX = np.array(c[0:3])
# dV = np.array([0,0,0])
# dO = dV
# eR = vee(0.5*(np.matmul(dR.T,R)-np.matmul(R.T,dR)))
# eO = O - np.matmul(np.matmul(R.T,dR),dO)
stdev = 0.6
R = Q2R(s[6:10])
X = s[0:3]
V = np.array(s[3:6])
O = np.array(s[10:13])
dR = Q2R(c[3:7])
dX = np.array(c[0:3])
dV = np.array([0,0,0])
dO = dV
zw = np.matmul(R,[0,0,1])
zproj = zw[2]
average = 4.5325/zproj
eX = dX - X
eXb = np.matmul(R.T,eX) #error on X in body Frame
eVb = np.matmul(R.T,dV - V)
kz = 1
kx = 0.5
ky = 0.5
kv = 0.3
eXb = eXb + eVb*kv
eXx = eXb[0]
eXy = eXb[1]
eXz = eXb[2]
average = average + eXz*kz
f1 = np.random.normal(average + 0.5*kx*eXx, stdev)
f2 = np.random.normal(average + 0.5*ky*eXy, stdev)
f3 = np.random.normal(average - 0.5*kx*eXx, stdev)
f4 = np.random.normal(average - 0.5*ky*eXy, stdev)
f = [f1,f2,f3,f4]
return f
def distCSQ(s,c,w = QWEIGHT):
# return the distance between a state point and a configuration point
# using quaternion
# s = [x,v,Q,W]
return distXQ(S2CQ(s),c,w)
def sampleFS(s):
# sample a fore vector
# based on the current state and try to balance the copter
# avf = 1.85*9.8/4
stdev = 0.5
q = s[6:10]
R = Q2R(q)
zw = np.matmul(R,[0,0,1])
zproj = zw[2]
average = 4.5325/(0.5*zproj+0.5)
f1 = abs(np.random.normal(average, stdev))
f2 = abs(np.random.normal(average, stdev))
f3 = abs(np.random.normal(average, stdev))
f4 = abs(np.random.normal(average, stdev))
f = [f1,f2,f3,f4]
return f
def sampleF():
# sample a fore vector
stdev = 1
average = 5;
f1 = abs(np.random.normal(average, stdev))
f2 = abs(np.random.normal(average, stdev))
f3 = abs(np.random.normal(average, stdev))
f4 = abs(np.random.normal(average, stdev))
f = [f1,f2,f3,f4]
return f
def distXQ(a,b, w = QWEIGHT):
# return the distance between two configuration
ax = np.array(a[0:3])
aq = np.array(a[3:7])
bx = np.array(b[0:3])
bq = np.array(b[3:7])
return np.linalg.norm(ax - bx) + w* (1 - np.abs(np.dot(aq,bq)))
def nnNodeCQ(tree,node):
# tree is a dictionary
# node is a list
# Configuratoin space
# Using quaternion for orientation
min = 10000
for i in tree:
iToNode = distXQ(node,i)
if iToNode < min:
min = iToNode
minIndex = i
return minIndex
def xyzt(start,end,t):
# return a interpolation of start to end at t
return list((np.array(end)-np.array(start))*t + np.array(start))
def stepXQ(start,end,n):
# return a configuration sequence in the form of [X,Q]
# n >=2
if n == 2:
return [start,end]
qs = cg.quat(start[3:7])
qe = cg.quat(end[3:7])
xyzs = start[0:3]
xyze = end[0:3]
nodes = []
for i in range(0,n):
t = float(i)/(n-1)
# print t
qt = cg.slerp(t, qs, qe, shortest=True)
nodes.append(list(xyzt(xyzs,xyze,t)+[qt.w,qt.x,qt.y,qt.z]))
return nodes
def stepNodesQ(start,end,step):
# return a list of nodes start from the s to e, with a specific step
# the node is in the form of [X,Q]
# the returned path exclude the start
l = distXQ(start,end)
if l <= step:
return [end]
else:
n = int(np.ceil(l/step)) + 1
nodes = stepXQ(start , end , n)
del nodes[0]
nodes.pop()
nodes.append(end)
return nodes
def discretePath(path, step = 0.1):
# input a path and a max step, return a discreterized path with maximum step
newPath = [path[0]]
for i in range(0,len(path)-1):
NodeS = path[i]
NodeE = path[i+1]
seg = stepNodesQ(NodeS,NodeE,step)
newPath = newPath + seg
return newPath
def step1NodeQ(start,end,step):
# return a list of nodes start from the s to e, with a specific step
l = distXQ(start,end)
if l <= step:
return end
else:
t = step/l
qs = cg.quat(start[3:7])
qe = cg.quat(end[3:7])
qt = cg.slerp(t, qs, qe, shortest=True)
return list(xyzt(start[0:3],end[0:3],t)+[qt.w,qt.x,qt.y,qt.z])
def getpath(tree,goal):
# get the path from a RRT tree
# tree is in dictionary
# path , goal is list
path = [goal]
while 1:
if tree[tuple(path[0])] == tuple(path[0]):
break
path = [list(tree[tuple(path[0])])]+path
return path
def nodesDist(x,y):
return np.linalg.norm(np.asarray(x)-np.asarray(y))
def stepNodes(start,end,step):
# return a list of nodes start from the s to e, with a specific step
l = nodesDist(start,end)
if l <= step:
return [end]
else:
n = int(np.ceil(l/step))
delta = (np.asarray(end)-np.asarray(start))/l*step
nodes = []
for i in range(0,n-1):
nodes.append(list(np.asarray(start)+delta*(i+1)))
nodes.append(end)
return nodes
def step1Node(start,end,step):
# return a node steer to end from start
l = nodesDist(start,end)
if l <= step:
return end
else:
return list(np.asarray(start)+(np.asarray(end)-np.asarray(start))/l*step)
def plotHist(x):
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='green', alpha=0.75)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.show()
def limitTo(a,lower,upper):
if a <= lower:
return lower
if a >= upper:
return upper
return a
# sample a anlge from
# [-4.5,3.5,-2.2,2.2,0.21,1.54]
def sampleCE(workspaceBound = [-4.5,4.5,-2.2,2.2,0.21,1.54]):
x = np.random.uniform(workspaceBound[0],workspaceBound[1])
y = np.random.uniform(workspaceBound[2],workspaceBound[3])
z = np.random.uniform(workspaceBound[4],workspaceBound[5])
q1 = np.random.uniform(0,2*np.pi)
q3 = np.random.uniform(0,2*np.pi)
while 1:
q2 = np.abs(np.random.normal(0,np.pi/4))
if q2 <= np.pi/2:
break
return [x,y,z,q1,q2,q3]
def sampleCQ(workspaceBound = [-4.5,4.5,-2.2,2.2,0.21,1.54]):
x = np.random.uniform(workspaceBound[0],workspaceBound[1])
y = np.random.uniform(workspaceBound[2],workspaceBound[3])
z = np.random.uniform(workspaceBound[4],workspaceBound[5])
q1 = np.random.uniform(0,2*np.pi)
q3 = np.random.uniform(0,2*np.pi)
# q3 = 0 #np.random.uniform(0,2*np.pi)
# while 1:
# q2 = np.abs(np.random.normal(0,np.pi/2))
# if q2 <= np.pi/2:
# break
q2 = np.random.uniform(0,0.5*np.pi)
return [x,y,z] + list(tf.quaternion_from_euler(q1,q2,q3,'rzxz'))
def E2Q(x):
return x[0:3] + list(tf.quaternion_from_euler(x[3],x[4],x[5],'rzxz'))
def Q2R(Q):
# convert a quaternion to a rotation matrix
# input must be a unit quaternion
qw = Q[0]
qx = Q[1]
qy = Q[2]
qz = Q[3]
R = np.array([[1 - 2*qy**2 - 2*qz**2, 2*qx*qy - 2*qz*qw, 2*qx*qz + 2*qy*qw],
[2*qx*qy + 2*qz*qw, 1 - 2*qx**2 - 2*qz**2, 2*qy*qz - 2*qx*qw],
[2*qx*qz - 2*qy*qw, 2*qy*qz + 2*qx*qw ,1 - 2*qx**2 - 2*qy**2]])
return R
def genCQ(x,y,z,q1,q2,q3):
# generate a quaternion by parameters
sq32 = sin(q3/2)
sq1 = sin(q1)
# print sq32
# print sq1
return [x,y,z,cos(q3/2),sq32*sq1*cos(q2),sq32*sq1*sin(q2),sq32*cos(q1)]
def hat(v):
# hat map of a vector
# input an numpy array or list, output an numpy array
return np.array([[0,-v[2],v[1]],[v[2],0,-v[0]],[-v[1],v[0],0]])
def vee(A):
# inverse of hat map
return np.array([A[2,1],A[0,2],A[1,0]])
def cross(a, b):
c = np.array([[a[1]*b[2] - a[2]*b[1]],
[a[2]*b[0] - a[0]*b[2]],
[a[0]*b[1] - a[1]*b[0]]])
return c
def updateState(s1,u,ts):
# update state x1 to x2 with control input u and time step ts
# s uses position vector and quaternion to represent
# s = [x,v,Q,W] Q is the position,velocity, attitude quaternion ad angular velocity
# the quaternion are translated to a rotation matrix for computation
# then the rotatoin matrix is converted to quaternion before return
# input and output are both lists
# u rotation speed of each motor
# a accelatation in inertial frame
# x position in inertial frame
# v velocity in inertial frame
# Q rotation quaternion of the body in the inertial frame
# W angular velocity in the body frame
# M moment vector in the body fixed frame
# m total mass of the drone
# Rd the derivetive of rotation matrix
# J inertia matrix
# ctf constant to convert force to torque: f*ctf = t
# MV moment vector f,mx,my,mz
J = np.array([[0.04,0,0],
[0,0.04,0],
[0,0,0.07]])
Jinv = np.array([[ 25. , 0. , 0. ],
[ 0. , 25. , 0. ],
[ 0. , 0. , 14.28571429]])
m = 1.85
d = 0.2
ctf = 0.008
g = 9.8
e3 = np.array([0,0,1])
MV = np.matmul(np.array([[1,1,1,1],[0,-d,0,d],[d,0,-d,0],[-ctf,ctf,-ctf,ctf]]),np.array([u[0],u[1],u[2],u[3]]))
f = MV[0]
M = MV[[1,2,3]]
x1 = np.array(s1[0:3])
v1 = np.array(s1[3:6])
Q1 = np.array(s1[6:10])
W1 = np.array(s1[10:13])
R1 = Q2R(Q1)
R1d = np.matmul(R1,hat(W1))
a = - g*e3+(f*np.matmul(R1,e3))/m
W1d = np.matmul( Jinv, M - np.cross(W1,np.matmul(J,W1)))
x2 = x1 + ts*v1
v2 = v1 + ts*a
R2 = R1 + ts*R1d
W2 = W1 + ts*W1d
R2t = np.identity(4)
R2t[0:3,0:3] = R2
Q2 = tf.quaternion_from_matrix(R2t)
s2 = list(x2)+list(v2)+list(Q2)+list(W2)
return s2
# start = [1,2,3,1,0,0,0]
# end = [3,2,5,0,1,0,0]
# # stepNodesQ
# for i in stepNodesQ(start,end,0.1):
# print i#,distXQ(i,start),distXQ(i,end)
# a = np.array([1,2,3])
# print np.dot(a,a)
# print "test update state"
# s2 = [0,0,0,0,0,0,1,0,0,0,0,0,0]
# # s1 = [1,1,1,1,0,0,0,0.2,0.2,0.2,0.1,0.1,-0.1]
# u = [0,0,0,0]
# ts = 0.02
# t = range(0,100)
# for tt in t:
# s2 = updateState(s2,u,ts)
# x1 = np.array(s2[0:3])
# v1 = np.array(s2[3:6])
# Q1 = np.array(s2[6:10])
# W1 = np.array(s2[10:13])
# E1 = tf.euler_from_quaternion(Q1)
# print x1
# print v1
# print Q1
# print W1
# axarr[0, 0].plot(x, y)
# axarr[0, 0].set_title('Axis [0,0]')
# axarr[0, 1].scatter(x, y)
# axarr[0, 1].set_title('Axis [0,1]')
# axarr[1, 0].plot(x, y ** 2)
# axarr[1, 0].set_title('Axis [1,0]')
# axarr[1, 1].scatter(x, y ** 2)
# axarr[1, 1].set_title('Axis [1,1]')
# # Fine-tune figure; hide x ticks for top plots and y ticks for right plots
# plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False)
# plt.setp([a.get_yticklabels() for a in axarr[:, 1]], visible=False)
# q = [1,0,0,0]
# q0 = tf.random_quaternion()
# r0 = Q2R(q0)
# print hat([1,2,3])
# print tf.euler_from_matrix(r0)
# print tf.euler_from_quaternion(q0)
# print hat([1,2,3])
# print [1,2,3,4][3]
# v = [1,2,3]
# np.array([0,-v[2],v[1]],[v[2],0,-v[0]],[-v[1],v[0],0])
# print sampleRotation()
# # print np.random.normal(0, 3.14, 1)
# eM = tf.euler_matrix(0,0,1.57)
# print eM
# print np.random.uniform(0,3)
# # print 1
# print tf.random_rotation_matrix()
# print np.dot(tf.random_quaternion(),tf.random_quaternion())
# print np.matmul(tf.random_rotation_matrix(),tf.random_rotation_matrix())
# start = tf.random_quaternion();
# print start
# print tuple(start)
# a = {tuple(start):tuple(start)}
# print a
# print a[tuple(start)]
# x = [sampleC()];
# KDtree = kdtree.create(x)
# print x
# for i in range(0,200):
# # x.append(sampleC()[5])
# newnode =sampleC()
# x.append(newnode)
# KDtree.add(newnode)
# # print x
# kdtree.visualize(KDtree)
# node = sampleC()
# print node
# a = KDtree.search_nn(node)[0].data
# print a
# aa = 1000
# for i in x:
# # print "this is i"
# # print np.asarray(i)
# # print type(np.asarray(i))
# # print np.linalg.norm(np.asarray(i),np.asarray(i))
# aa = min(aa,np.linalg.norm(np.asarray(i)-np.asarray(node)))
# print aa
# print np.linalg.norm(np.asarray(a)-np.asarray(node))
# print nodesDist(1,3)
# print nodesDist([1,2,3],[4,5,6])
# print np.power(nodesDist([[2,3,4],[2,3,4]],[[1,2,3],[1,2,3]]),2)
# print np.asarray([[2,3,4],[2,3,4]])
# print np.floor(3.4)
# yy = [];
# yy.append([1,2,3])
# yy.append([1,2,5])
# print yy
# print ""
# print step1Node([30,40],[0,0.1],5)
# a = {(2,3):(1,2),(1,2):(1,2),(3,4):(1,2),(5,6):(3,4),(9,8):(3,4)};
# print a
# print getpath(a,[5,6])
# print ""
# points = np.array([ (3, 4), (1, 2),(4, 5),(6,7),(2,5),(2,4)])
# points = [[1,2],[4,5],[5,2]]
# point_tree = spatial.KDTree(points)
# This finds the index of all points within distance 1 of [1.5,2.5].
# print(point_tree.query_ball_point([1.5, 2.5], 2))
# print point_tree.query([1.5, 2.5])
# print point_tree.data[point_tree.query([1.5, 2.5])[1]]
# [0]
# # This gives the point in the KDTree which is within 1 unit of [1.5, 2.5]
# print(point_tree.data[point_tree.query_ball_point([1.5, 2.5], 1)])
# # [[1 2]]
# # More than one point is within 3 units of [1.5, 1.6].
# print(point_tree.data[point_tree.query_ball_point([1.5, 1.6], 3)])
# # [[1 2]
# # [3 4]]
# x = []
# for i in range(0,1000):
# while 1:
# q1 = np.random.normal(np.pi/4,np.pi/8)
# if np.abs(q1-np.pi/4) <= np.pi/4:
# break
# x.append(q1)
# plotHist(x)
# startconfig = [ 4.0,-1.5 ,0.2 ,1 ,0.0, 0.0, 0.0 ]
# print E2Q(startconfig) | mit | -7,179,047,926,765,260,000 | 20.93949 | 112 | 0.605865 | false |
Sendinel/Sendinel | sendinel/notifications/forms.py | 1 | 1461 | from django.forms import CharField, ModelChoiceField, DateTimeField, Form
from django.utils.translation import ugettext as _
from sendinel.backend.authhelper import format_and_validate_phonenumber
from sendinel.backend.models import Sendable, \
WayOfCommunication, \
get_enabled_wocs
class NotificationValidationForm(Form):
phone_number = CharField(validators = [format_and_validate_phonenumber],
error_messages={'required':_('Please enter a phone number')})
way_of_communication = ModelChoiceField(
queryset = get_enabled_wocs(),
error_messages={'required': \
_('Please choose a way of communication')})
date = DateTimeField(error_messages={ \
'required': _('Please choose a date'), \
'invalid': _('Please choose a date')})
class NotificationValidationFormBluetooth(Form):
way_of_communication = ModelChoiceField(
queryset = get_enabled_wocs(),
error_messages={'required': \
_('Please choose a way of communication')})
date = DateTimeField(error_messages={ \
'required': _('Please choose a date'), \
'invalid': _('Please choose a date')})
| mit | 6,670,814,485,201,891,000 | 46.129032 | 80 | 0.543463 | false |
DanielWaterworth/AODBM | python_tests/simple_test.py | 1 | 1871 | '''
Copyright (C) 2011 aodbm authors,
This file is part of aodbm.
aodbm is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
aodbm is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import unittest, aodbm
class TestSimple(unittest.TestCase):
def setUp(self):
self.db = aodbm.AODBM('testdb')
def test_simple(self):
# a simple test with one record
ver = aodbm.Version(self.db, 0)
self.assertRaises(KeyError, ver.__getitem__, 'hello')
self.assertFalse(ver.has('test'))
ver['test'] = 'hello'
self.assertEqual(ver['test'], 'hello')
self.assertTrue(ver.has('test'))
del ver['test']
self.assertRaises(KeyError, ver.__getitem__, 'test')
self.assertFalse(ver.has('test'))
del ver['test']
self.assertRaises(KeyError, ver.__getitem__, 'test')
self.assertFalse(ver.has('test'))
def test_insert_empty(self):
ver = aodbm.Version(self.db, 0)
ver['hello'] = 'world'
# create an empty root node
del ver['hello']
ver['hello'] = 'world'
self.assertTrue(ver.has('hello'))
self.assertEqual(ver['hello'], 'world')
tests = [TestSimple]
tests = map(unittest.TestLoader().loadTestsFromTestCase, tests)
tests = unittest.TestSuite(tests)
| gpl-3.0 | 526,834,158,543,468,200 | 34.980769 | 79 | 0.645644 | false |
phenoxim/nova | nova/tests/functional/test_server_group.py | 1 | 36573 | # Copyright 2015 Ericsson AB
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import mock
from oslo_config import cfg
from nova import context
from nova import db
from nova.db.sqlalchemy import api as db_api
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
from nova.tests.unit import policy_fixture
from nova.virt import fake
import nova.scheduler.utils
import nova.servicegroup
import nova.tests.unit.image.fake
# An alternate project id
PROJECT_ID_ALT = "616c6c796f7572626173656172656f73"
CONF = cfg.CONF
class ServerGroupTestBase(test.TestCase,
integrated_helpers.InstanceHelperMixin):
REQUIRES_LOCKING = True
api_major_version = 'v2.1'
microversion = None
_enabled_filters = (CONF.filter_scheduler.enabled_filters
+ ['ServerGroupAntiAffinityFilter',
'ServerGroupAffinityFilter'])
# Override servicegroup parameters to make the tests run faster
_service_down_time = 10
_report_interval = 1
anti_affinity = {'name': 'fake-name-1', 'policies': ['anti-affinity']}
affinity = {'name': 'fake-name-2', 'policies': ['affinity']}
def _get_weight_classes(self):
return []
def setUp(self):
super(ServerGroupTestBase, self).setUp()
self.flags(enabled_filters=self._enabled_filters,
group='filter_scheduler')
# NOTE(sbauza): Don't verify VCPUS and disks given the current nodes.
self.flags(cpu_allocation_ratio=9999.0)
self.flags(disk_allocation_ratio=9999.0)
self.flags(weight_classes=self._get_weight_classes(),
group='filter_scheduler')
self.flags(service_down_time=self._service_down_time)
self.flags(report_interval=self._report_interval)
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
self.api.microversion = self.microversion
self.admin_api = api_fixture.admin_api
self.admin_api.microversion = self.microversion
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.start_service('conductor')
self.start_service('scheduler')
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
def _boot_a_server_to_group(self, group,
expected_status='ACTIVE', flavor=None):
server = self._build_minimal_create_server_request(self.api,
'some-server')
if flavor:
server['flavorRef'] = ('http://fake.server/%s'
% flavor['id'])
post = {'server': server,
'os:scheduler_hints': {'group': group['id']}}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
# Wait for it to finish being created
found_server = self._wait_for_state_change(
self.admin_api, created_server, expected_status)
return found_server
class ServerGroupFakeDriver(fake.SmallFakeDriver):
"""A specific fake driver for our tests.
Here, we only want to be RAM-bound.
"""
vcpus = 1000
memory_mb = 8192
local_gb = 100000
# A fake way to change the FakeDriver given we don't have a possibility yet to
# modify the resources for the FakeDriver
def _fake_load_compute_driver(virtapi, compute_driver=None):
return ServerGroupFakeDriver(virtapi)
class ServerGroupTestV21(ServerGroupTestBase):
def setUp(self):
super(ServerGroupTestV21, self).setUp()
# TODO(sbauza): Remove that once there is a way to have a custom
# FakeDriver supporting different resources. Note that we can't also
# simply change the config option for choosing our custom fake driver
# as the mocked method only accepts to load drivers in the nova.virt
# tree.
self.stub_out('nova.virt.driver.load_compute_driver',
_fake_load_compute_driver)
fake.set_nodes(['compute'])
self.compute = self.start_service('compute', host='compute')
# NOTE(gibi): start a second compute host to be able to test affinity
# NOTE(sbauza): Make sure the FakeDriver returns a different nodename
# for the second compute node.
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.compute2 = self.start_service('compute', host='host2')
fake_network.set_stub_network_methods(self)
def test_get_no_groups(self):
groups = self.api.get_server_groups()
self.assertEqual([], groups)
def test_create_and_delete_groups(self):
groups = [self.anti_affinity,
self.affinity]
created_groups = []
for group in groups:
created_group = self.api.post_server_groups(group)
created_groups.append(created_group)
self.assertEqual(group['name'], created_group['name'])
self.assertEqual(group['policies'], created_group['policies'])
self.assertEqual([], created_group['members'])
self.assertEqual({}, created_group['metadata'])
self.assertIn('id', created_group)
group_details = self.api.get_server_group(created_group['id'])
self.assertEqual(created_group, group_details)
existing_groups = self.api.get_server_groups()
self.assertIn(created_group, existing_groups)
existing_groups = self.api.get_server_groups()
self.assertEqual(len(groups), len(existing_groups))
for group in created_groups:
self.api.delete_server_group(group['id'])
existing_groups = self.api.get_server_groups()
self.assertNotIn(group, existing_groups)
def test_create_wrong_policy(self):
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_groups,
{'name': 'fake-name-1',
'policies': ['wrong-policy']})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Invalid input', ex.response.text)
self.assertIn('wrong-policy', ex.response.text)
def test_get_groups_all_projects(self):
# This test requires APIs using two projects.
# Create an API using project 'openstack1'.
# This is a non-admin API.
#
# NOTE(sdague): this is actually very much *not* how this
# fixture should be used. This actually spawns a whole
# additional API server. Should be addressed in the future.
api_openstack1 = self.useFixture(nova_fixtures.OSAPIFixture(
api_version=self.api_major_version,
project_id=PROJECT_ID_ALT)).api
api_openstack1.microversion = self.microversion
# Create a server group in project 'openstack'
# Project 'openstack' is used by self.api
group1 = self.anti_affinity
openstack_group = self.api.post_server_groups(group1)
# Create a server group in project 'openstack1'
group2 = self.affinity
openstack1_group = api_openstack1.post_server_groups(group2)
# The admin should be able to get server groups in all projects.
all_projects_admin = self.admin_api.get_server_groups(
all_projects=True)
self.assertIn(openstack_group, all_projects_admin)
self.assertIn(openstack1_group, all_projects_admin)
# The non-admin should only be able to get server groups
# in his project.
# The all_projects parameter is ignored for non-admin clients.
all_projects_non_admin = api_openstack1.get_server_groups(
all_projects=True)
self.assertNotIn(openstack_group, all_projects_non_admin)
self.assertIn(openstack1_group, all_projects_non_admin)
def test_create_duplicated_policy(self):
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_groups,
{"name": "fake-name-1",
"policies": ["affinity", "affinity"]})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Invalid input', ex.response.text)
def test_create_multiple_policies(self):
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_groups,
{"name": "fake-name-1",
"policies": ["anti-affinity", "affinity"]})
self.assertEqual(400, ex.response.status_code)
def _boot_servers_to_group(self, group, flavor=None):
servers = []
for _ in range(0, 2):
server = self._boot_a_server_to_group(group,
flavor=flavor)
servers.append(server)
return servers
def test_boot_servers_with_affinity(self):
created_group = self.api.post_server_groups(self.affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
host = servers[0]['OS-EXT-SRV-ATTR:host']
for server in servers:
self.assertIn(server['id'], members)
self.assertEqual(host, server['OS-EXT-SRV-ATTR:host'])
def test_boot_servers_with_affinity_overquota(self):
# Tests that we check server group member quotas and cleanup created
# resources when we fail with OverQuota.
self.flags(server_group_members=1, group='quota')
# make sure we start with 0 servers
servers = self.api.get_servers(detail=False)
self.assertEqual(0, len(servers))
created_group = self.api.post_server_groups(self.affinity)
ex = self.assertRaises(client.OpenStackApiException,
self._boot_servers_to_group,
created_group)
self.assertEqual(403, ex.response.status_code)
# _boot_servers_to_group creates 2 instances in the group in order, not
# multiple servers in a single request. Since our quota is 1, the first
# server create would pass, the second should fail, and we should be
# left with 1 server and it's 1 block device mapping.
servers = self.api.get_servers(detail=False)
self.assertEqual(1, len(servers))
ctxt = context.get_admin_context()
servers = db.instance_get_all(ctxt)
self.assertEqual(1, len(servers))
ctxt_mgr = db_api.get_context_manager(ctxt)
with ctxt_mgr.reader.using(ctxt):
bdms = db_api._block_device_mapping_get_query(ctxt).all()
self.assertEqual(1, len(bdms))
self.assertEqual(servers[0]['uuid'], bdms[0]['instance_uuid'])
def test_boot_servers_with_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.affinity)
# Using big enough flavor to use up the resources on the host
flavor = self.api.get_flavors()[2]
self._boot_servers_to_group(created_group, flavor=flavor)
# The third server cannot be booted as there is not enough resource
# on the host where the first two server was booted
failed_server = self._boot_a_server_to_group(created_group,
flavor=flavor,
expected_status='ERROR')
self.assertEqual('No valid host was found. '
'There are not enough hosts available.',
failed_server['fault']['message'])
def test_boot_servers_with_anti_affinity(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
for server in servers:
self.assertIn(server['id'], members)
def test_boot_server_with_anti_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.anti_affinity)
self._boot_servers_to_group(created_group)
# We have 2 computes so the third server won't fit into the same group
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('No valid host was found. '
'There are not enough hosts available.',
failed_server['fault']['message'])
def _rebuild_with_group(self, group):
created_group = self.api.post_server_groups(group)
servers = self._boot_servers_to_group(created_group)
post = {'rebuild': {'imageRef':
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'}}
self.api.post_server_action(servers[1]['id'], post)
rebuilt_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
self.assertEqual(post['rebuild']['imageRef'],
rebuilt_server.get('image')['id'])
return [servers[0], rebuilt_server]
def test_rebuild_with_affinity(self):
untouched_server, rebuilt_server = self._rebuild_with_group(
self.affinity)
self.assertEqual(untouched_server['OS-EXT-SRV-ATTR:host'],
rebuilt_server['OS-EXT-SRV-ATTR:host'])
def test_rebuild_with_anti_affinity(self):
untouched_server, rebuilt_server = self._rebuild_with_group(
self.anti_affinity)
self.assertNotEqual(untouched_server['OS-EXT-SRV-ATTR:host'],
rebuilt_server['OS-EXT-SRV-ATTR:host'])
def _migrate_with_group_no_valid_host(self, group):
created_group = self.api.post_server_groups(group)
servers = self._boot_servers_to_group(created_group)
post = {'migrate': {}}
ex = self.assertRaises(client.OpenStackApiException,
self.admin_api.post_server_action,
servers[1]['id'], post)
self.assertEqual(400, ex.response.status_code)
self.assertIn('No valid host found for cold migrate', ex.response.text)
def test_migrate_with_group_no_valid_host(self):
for group in [self.affinity, self.anti_affinity]:
self._migrate_with_group_no_valid_host(group)
def test_migrate_with_anti_affinity(self):
# Start additional host to test migration with anti-affinity
fake.set_nodes(['host3'])
self.start_service('compute', host='host3')
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
post = {'migrate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE')
self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
migrated_server['OS-EXT-SRV-ATTR:host'])
def test_resize_to_same_host_with_anti_affinity(self):
self.flags(allow_resize_to_same_host=True)
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group,
flavor=self.api.get_flavors()[0])
post = {'resize': {'flavorRef': '2'}}
server1_old_host = servers[1]['OS-EXT-SRV-ATTR:host']
self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE')
self.assertEqual(server1_old_host,
migrated_server['OS-EXT-SRV-ATTR:host'])
def _get_compute_service_by_host_name(self, host_name):
host = None
if self.compute.host == host_name:
host = self.compute
elif self.compute2.host == host_name:
host = self.compute2
else:
raise AssertionError('host = %s does not found in '
'existing hosts %s' %
(host_name, str([self.compute.host,
self.compute2.host])))
return host
def test_evacuate_with_anti_affinity(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
# Start additional host to test evacuation
fake.set_nodes(['host3'])
self.start_service('compute', host='host3')
post = {'evacuate': {'onSharedStorage': False}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['done'])
evacuated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
# check that the server is evacuated to another host
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
# check that anti-affinity policy is kept during evacuation
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_evacuate_with_anti_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {'onSharedStorage': False}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host
# as before
self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_evacuate_with_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {'onSharedStorage': False}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host
# as before
self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_soft_affinity_not_supported(self):
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_groups,
{'name': 'fake-name-1',
'policies': ['soft-affinity']})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Invalid input', ex.response.text)
self.assertIn('soft-affinity', ex.response.text)
class ServerGroupAffinityConfTest(ServerGroupTestBase):
api_major_version = 'v2.1'
# Load only anti-affinity filter so affinity will be missing
_enabled_filters = ['ServerGroupAntiAffinityFilter']
@mock.patch('nova.scheduler.utils._SUPPORTS_AFFINITY', None)
def test_affinity_no_filter(self):
created_group = self.api.post_server_groups(self.affinity)
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('ServerGroup policy is not supported: '
'ServerGroupAffinityFilter not configured',
failed_server['fault']['message'])
self.assertEqual(400, failed_server['fault']['code'])
class ServerGroupAntiAffinityConfTest(ServerGroupTestBase):
api_major_version = 'v2.1'
# Load only affinity filter so anti-affinity will be missing
_enabled_filters = ['ServerGroupAffinityFilter']
@mock.patch('nova.scheduler.utils._SUPPORTS_ANTI_AFFINITY', None)
def test_anti_affinity_no_filter(self):
created_group = self.api.post_server_groups(self.anti_affinity)
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('ServerGroup policy is not supported: '
'ServerGroupAntiAffinityFilter not configured',
failed_server['fault']['message'])
self.assertEqual(400, failed_server['fault']['code'])
class ServerGroupSoftAffinityConfTest(ServerGroupTestBase):
api_major_version = 'v2.1'
microversion = '2.15'
soft_affinity = {'name': 'fake-name-4',
'policies': ['soft-affinity']}
def _get_weight_classes(self):
# Load only soft-anti-affinity weigher so affinity will be missing
return ['nova.scheduler.weights.affinity.'
'ServerGroupSoftAntiAffinityWeigher']
@mock.patch('nova.scheduler.utils._SUPPORTS_SOFT_AFFINITY', None)
def test_soft_affinity_no_filter(self):
created_group = self.api.post_server_groups(self.soft_affinity)
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('ServerGroup policy is not supported: '
'ServerGroupSoftAffinityWeigher not configured',
failed_server['fault']['message'])
self.assertEqual(400, failed_server['fault']['code'])
class ServerGroupSoftAntiAffinityConfTest(ServerGroupTestBase):
api_major_version = 'v2.1'
microversion = '2.15'
soft_anti_affinity = {'name': 'fake-name-3',
'policies': ['soft-anti-affinity']}
def _get_weight_classes(self):
# Load only soft affinity filter so anti-affinity will be missing
return ['nova.scheduler.weights.affinity.'
'ServerGroupSoftAffinityWeigher']
@mock.patch('nova.scheduler.utils._SUPPORTS_SOFT_ANTI_AFFINITY', None)
def test_soft_anti_affinity_no_filter(self):
created_group = self.api.post_server_groups(self.soft_anti_affinity)
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('ServerGroup policy is not supported: '
'ServerGroupSoftAntiAffinityWeigher not configured',
failed_server['fault']['message'])
self.assertEqual(400, failed_server['fault']['code'])
class ServerGroupTestV215(ServerGroupTestV21):
api_major_version = 'v2.1'
microversion = '2.15'
soft_anti_affinity = {'name': 'fake-name-3',
'policies': ['soft-anti-affinity']}
soft_affinity = {'name': 'fake-name-4',
'policies': ['soft-affinity']}
def setUp(self):
super(ServerGroupTestV215, self).setUp()
soft_affinity_patcher = mock.patch(
'nova.scheduler.utils._SUPPORTS_SOFT_AFFINITY')
soft_anti_affinity_patcher = mock.patch(
'nova.scheduler.utils._SUPPORTS_SOFT_ANTI_AFFINITY')
self.addCleanup(soft_affinity_patcher.stop)
self.addCleanup(soft_anti_affinity_patcher.stop)
self.mock_soft_affinity = soft_affinity_patcher.start()
self.mock_soft_anti_affinity = soft_anti_affinity_patcher.start()
self.mock_soft_affinity.return_value = None
self.mock_soft_anti_affinity.return_value = None
def _get_weight_classes(self):
return ['nova.scheduler.weights.affinity.'
'ServerGroupSoftAffinityWeigher',
'nova.scheduler.weights.affinity.'
'ServerGroupSoftAntiAffinityWeigher']
def test_evacuate_with_anti_affinity(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
# Start additional host to test evacuation
fake.set_nodes(['host3'])
compute3 = self.start_service('compute', host='host3')
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['done'])
evacuated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
# check that the server is evacuated
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
# check that policy is kept
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host'])
compute3.kill()
host.start()
def test_evacuate_with_anti_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host
# as before
self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_evacuate_with_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host
# as before
self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_create_and_delete_groups(self):
groups = [self.anti_affinity,
self.affinity,
self.soft_affinity,
self.soft_anti_affinity]
created_groups = []
for group in groups:
created_group = self.api.post_server_groups(group)
created_groups.append(created_group)
self.assertEqual(group['name'], created_group['name'])
self.assertEqual(group['policies'], created_group['policies'])
self.assertEqual([], created_group['members'])
self.assertEqual({}, created_group['metadata'])
self.assertIn('id', created_group)
group_details = self.api.get_server_group(created_group['id'])
self.assertEqual(created_group, group_details)
existing_groups = self.api.get_server_groups()
self.assertIn(created_group, existing_groups)
existing_groups = self.api.get_server_groups()
self.assertEqual(len(groups), len(existing_groups))
for group in created_groups:
self.api.delete_server_group(group['id'])
existing_groups = self.api.get_server_groups()
self.assertNotIn(group, existing_groups)
def test_boot_servers_with_soft_affinity(self):
created_group = self.api.post_server_groups(self.soft_affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
self.assertEqual(2, len(servers))
self.assertIn(servers[0]['id'], members)
self.assertIn(servers[1]['id'], members)
self.assertEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
def test_boot_servers_with_soft_affinity_no_resource_on_first_host(self):
created_group = self.api.post_server_groups(self.soft_affinity)
# Using big enough flavor to use up the resources on the first host
flavor = self.api.get_flavors()[2]
servers = self._boot_servers_to_group(created_group, flavor)
# The third server cannot be booted on the first host as there
# is not enough resource there, but as opposed to the affinity policy
# it will be booted on the other host, which has enough resources.
third_server = self._boot_a_server_to_group(created_group,
flavor=flavor)
members = self.api.get_server_group(created_group['id'])['members']
hosts = []
for server in servers:
hosts.append(server['OS-EXT-SRV-ATTR:host'])
self.assertIn(third_server['id'], members)
self.assertNotIn(third_server['OS-EXT-SRV-ATTR:host'], hosts)
def test_boot_servers_with_soft_anti_affinity(self):
created_group = self.api.post_server_groups(self.soft_anti_affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
self.assertEqual(2, len(servers))
self.assertIn(servers[0]['id'], members)
self.assertIn(servers[1]['id'], members)
self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
def test_boot_servers_with_soft_anti_affinity_one_available_host(self):
self.compute2.kill()
created_group = self.api.post_server_groups(self.soft_anti_affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
host = servers[0]['OS-EXT-SRV-ATTR:host']
for server in servers:
self.assertIn(server['id'], members)
self.assertEqual(host, server['OS-EXT-SRV-ATTR:host'])
def test_rebuild_with_soft_affinity(self):
untouched_server, rebuilt_server = self._rebuild_with_group(
self.soft_affinity)
self.assertEqual(untouched_server['OS-EXT-SRV-ATTR:host'],
rebuilt_server['OS-EXT-SRV-ATTR:host'])
def test_rebuild_with_soft_anti_affinity(self):
untouched_server, rebuilt_server = self._rebuild_with_group(
self.soft_anti_affinity)
self.assertNotEqual(untouched_server['OS-EXT-SRV-ATTR:host'],
rebuilt_server['OS-EXT-SRV-ATTR:host'])
def _migrate_with_soft_affinity_policies(self, group):
created_group = self.api.post_server_groups(group)
servers = self._boot_servers_to_group(created_group)
post = {'migrate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE')
return [migrated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host']]
def test_migrate_with_soft_affinity(self):
migrated_server, other_server = (
self._migrate_with_soft_affinity_policies(self.soft_affinity))
self.assertNotEqual(migrated_server, other_server)
def test_migrate_with_soft_anti_affinity(self):
migrated_server, other_server = (
self._migrate_with_soft_affinity_policies(self.soft_anti_affinity))
self.assertEqual(migrated_server, other_server)
def _evacuate_with_soft_anti_affinity_policies(self, group):
created_group = self.api.post_server_groups(group)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['done'])
evacuated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
# Note(gibi): need to get the server again as the state of the instance
# goes to ACTIVE first then the host of the instance changes to the
# new host later
evacuated_server = self.admin_api.get_server(evacuated_server['id'])
host.start()
return [evacuated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host']]
def test_evacuate_with_soft_affinity(self):
evacuated_server, other_server = (
self._evacuate_with_soft_anti_affinity_policies(
self.soft_affinity))
self.assertNotEqual(evacuated_server, other_server)
def test_evacuate_with_soft_anti_affinity(self):
evacuated_server, other_server = (
self._evacuate_with_soft_anti_affinity_policies(
self.soft_anti_affinity))
self.assertEqual(evacuated_server, other_server)
def test_soft_affinity_not_supported(self):
pass
| apache-2.0 | -3,361,309,554,332,246,000 | 42.179457 | 79 | 0.615727 | false |
indictranstech/internal-frappe | frappe/commands.py | 1 | 22056 | # Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import
import sys
import os
import subprocess
import json
import click
import hashlib
import cProfile
import StringIO
import pstats
import frappe
import frappe.utils
from frappe.utils import cint
from distutils.spawn import find_executable
from functools import wraps
def pass_context(f):
@wraps(f)
def _func(ctx, *args, **kwargs):
profile = ctx.obj['profile']
if profile:
pr = cProfile.Profile()
pr.enable()
ret = f(frappe._dict(ctx.obj), *args, **kwargs)
if profile:
pr.disable()
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s)\
.sort_stats('cumtime', 'tottime', 'ncalls')
ps.print_stats()
print s.getvalue()
return ret
return click.pass_context(_func)
def get_single_site(context):
if not len(context.sites) == 1:
print 'please select a site'
sys.exit(1)
site = context.sites[0]
return site
@click.command('new-site')
@click.argument('site')
@click.option('--db-name', help='Database name')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--admin-password', help='Administrator password for new site', default=None)
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
@click.option('--force', help='Force restore if site/database already exists', is_flag=True, default=False)
@click.option('--source_sql', help='Initiate database with a SQL file')
@click.option('--install-app', multiple=True, help='Install app after installation')
def new_site(site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None, force=None, install_app=None, db_name=None):
"Install a new site"
if not db_name:
db_name = hashlib.sha1(site).hexdigest()[:10]
frappe.init(site=site)
_new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password, verbose=verbose, install_apps=install_app, source_sql=source_sql, force=force)
if len(frappe.utils.get_sites()) == 1:
use(site)
def _new_site(db_name, site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None,force=False, reinstall=False):
"Install a new Frappe site"
from frappe.installer import install_db, make_site_dirs
from frappe.installer import install_app as _install_app
import frappe.utils.scheduler
frappe.init(site=site)
try:
# enable scheduler post install?
enable_scheduler = _is_scheduler_enabled()
except:
enable_scheduler = False
install_db(root_login=mariadb_root_username, root_password=mariadb_root_password, db_name=db_name, admin_password=admin_password, verbose=verbose, source_sql=source_sql,force=force, reinstall=reinstall)
make_site_dirs()
_install_app("frappe", verbose=verbose, set_as_patched=not source_sql)
if frappe.conf.get("install_apps"):
for app in frappe.conf.install_apps:
_install_app(app, verbose=verbose, set_as_patched=not source_sql)
if install_apps:
for app in install_apps:
_install_app(app, verbose=verbose, set_as_patched=not source_sql)
frappe.utils.scheduler.toggle_scheduler(enable_scheduler)
scheduler_status = "disabled" if frappe.utils.scheduler.is_scheduler_disabled() else "enabled"
print "*** Scheduler is", scheduler_status, "***"
frappe.destroy()
def _is_scheduler_enabled():
enable_scheduler = False
try:
frappe.connect()
enable_scheduler = cint(frappe.db.get_default("enable_scheduler"))
except:
pass
finally:
frappe.db.close()
return enable_scheduler
@click.command('restore')
@click.argument('sql-file-path')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--db-name', help='Database name for site in case it is a new one')
@click.option('--admin-password', help='Administrator password for new site')
@click.option('--install-app', multiple=True, help='Install app after installation')
@pass_context
def restore(context, sql_file_path, mariadb_root_username=None, mariadb_root_password=None, db_name=None, verbose=None, install_app=None, admin_password=None, force=None):
"Restore site database from an sql file"
site = get_single_site(context)
frappe.init(site=site)
if not db_name:
db_name = frappe.conf.db_name
_new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password, verbose=context.verbose, install_apps=install_app, source_sql=sql_file_path, force=context.force)
@click.command('reinstall')
@pass_context
def reinstall(context):
"Reinstall site ie. wipe all data and start over"
site = get_single_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.clear_cache()
installed = frappe.get_installed_apps()
frappe.clear_cache()
except Exception, e:
installed = []
finally:
if frappe.db:
frappe.db.close()
frappe.destroy()
frappe.init(site=site)
_new_site(frappe.conf.db_name, site, verbose=context.verbose, force=True, reinstall=True, install_apps=installed)
@click.command('install-app')
@click.argument('app')
@pass_context
def install_app(context, app):
"Install a new app to site"
from frappe.installer import install_app as _install_app
for site in context.sites:
frappe.init(site=site)
frappe.connect()
try:
_install_app(app, verbose=context.verbose)
finally:
frappe.destroy()
@click.command('add-system-manager')
@click.argument('email')
@click.option('--first-name')
@click.option('--last-name')
@pass_context
def add_system_manager(context, email, first_name, last_name):
"Add a new system manager to a site"
import frappe.utils.user
for site in context.sites:
frappe.connect(site=site)
try:
frappe.utils.user.add_system_manager(email, first_name, last_name)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('migrate')
@click.option('--rebuild-website', help="Rebuild webpages after migration")
@pass_context
def migrate(context, rebuild_website=False):
"Run patches, sync schema and rebuild files/translations"
import frappe.modules.patch_handler
import frappe.model.sync
from frappe.utils.fixtures import sync_fixtures
import frappe.translate
from frappe.desk.notifications import clear_notifications
verbose = context.verbose
for site in context.sites:
print 'Migrating', site
frappe.init(site=site)
frappe.connect()
try:
prepare_for_update()
# run patches
frappe.modules.patch_handler.run_all()
# sync
frappe.model.sync.sync_all(verbose=context.verbose)
frappe.translate.clear_cache()
sync_fixtures()
clear_notifications()
if rebuild_website:
build_website()
finally:
frappe.destroy()
def prepare_for_update():
from frappe.sessions import clear_global_cache
clear_global_cache()
@click.command('run-patch')
@click.argument('module')
@click.pass_context
def run_patch(context, module):
"Run a particular patch"
import frappe.modules.patch_handler
for site in context.sites:
frappe.init(site=site)
try:
frappe.connect()
frappe.modules.patch_handler.run_single(module, force=context.force)
finally:
frappe.destroy()
@click.command('reload-doc')
@click.argument('module')
@click.argument('doctype')
@click.argument('docname')
@pass_context
def reload_doc(context, module, doctype, docname):
"Reload schema for a DocType"
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.reload_doc(module, doctype, docname, force=context.force)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('build')
@click.option('--make-copy', is_flag=True, default=False, help='Copy the files instead of symlinking')
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
def build(make_copy=False, verbose=False):
"Minify + concatenate JS and CSS files, build translations"
import frappe.build
import frappe
frappe.init('')
frappe.build.bundle(False, make_copy=make_copy, verbose=verbose)
@click.command('watch')
def watch():
"Watch and concatenate JS and CSS files as and when they change"
import frappe.build
frappe.init('')
frappe.build.watch(True)
@click.command('clear-cache')
@pass_context
def clear_cache(context):
"Clear cache, doctype cache and defaults"
import frappe.sessions
import frappe.website.render
from frappe.desk.notifications import clear_notifications
for site in context.sites:
try:
frappe.connect(site)
frappe.clear_cache()
clear_notifications()
frappe.website.render.clear_cache()
finally:
frappe.destroy()
@click.command('clear-website-cache')
@pass_context
def clear_website_cache(context):
"Clear website cache"
import frappe.website.render
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.website.render.clear_cache()
finally:
frappe.destroy()
@click.command('destroy-all-sessions')
@pass_context
def destroy_all_sessions(context):
"Clear sessions of all users (logs them out)"
import frappe.sessions
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.sessions.clear_all_sessions()
frappe.db.commit()
finally:
frappe.destroy()
@click.command('sync-www')
@pass_context
def sync_www(context):
"Sync files from static pages from www directory to Web Pages"
from frappe.website import statics
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
statics.sync_statics(rebuild=context.force)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('build-website')
@pass_context
def build_website(context):
"Sync statics and clear cache"
from frappe.website import render, statics
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
render.clear_cache()
statics.sync(verbose=context.verbose).start(True)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('setup-docs')
@click.argument('app')
@click.argument('docs-app')
@click.argument('path')
@pass_context
def setup_docs(context,app, docs_app, path):
"Setup docs in target folder of target app"
from frappe.utils.setup_docs import setup_docs
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
setup_docs(app, docs_app, path)
finally:
frappe.destroy()
@click.command('build-docs')
@click.argument('app')
@pass_context
def build_docs(context, app):
"Build docs from /src to /www folder in app"
from frappe.utils.autodoc import build
frappe.destroy()
for site in context.sites:
try:
frappe.init(site=site)
build(app)
finally:
frappe.destroy()
@click.command('reset-perms')
@pass_context
def reset_perms(context):
"Reset permissions for all doctypes"
from frappe.permissions import reset_perms
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
for d in frappe.db.sql_list("""select name from `tabDocType`
where ifnull(istable, 0)=0 and ifnull(custom, 0)=0"""):
frappe.clear_cache(doctype=d)
reset_perms(d)
finally:
frappe.destroy()
@click.command('execute')
@click.argument('method')
@pass_context
def execute(context, method):
"execute a function"
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
print frappe.local.site
ret = frappe.get_attr(method)()
if frappe.db:
frappe.db.commit()
finally:
frappe.destroy()
if ret:
print ret
@click.command('celery')
@click.argument('args')
def celery(args):
"Run a celery command"
python = sys.executable
os.execv(python, [python, "-m", "frappe.celery_app"] + args.split())
@click.command('trigger-scheduler-event')
@click.argument('event')
@pass_context
def trigger_scheduler_event(context, event):
"Trigger a scheduler event"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.trigger(site, event, now=context.force)
finally:
frappe.destroy()
@click.command('enable-scheduler')
@pass_context
def enable_scheduler(context):
"Enable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.enable_scheduler()
frappe.db.commit()
print "Enabled for", site
finally:
frappe.destroy()
@click.command('disable-scheduler')
@pass_context
def disable_scheduler(context):
"Disable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.disable_scheduler()
frappe.db.commit()
print "Disabled for", site
finally:
frappe.destroy()
@click.command('export-doc')
@click.argument('doctype')
@click.argument('docname')
@pass_context
def export_doc(context, doctype, docname):
"Export a single document to csv"
import frappe.modules
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.modules.export_doc(doctype, docname)
finally:
frappe.destroy()
@click.command('export-json')
@click.argument('doctype')
@click.argument('name')
@click.argument('path')
@pass_context
def export_json(context, doctype, name, path):
"Export doclist as json to the given path, use '-' as name for Singles."
from frappe.core.page.data_import_tool import data_import_tool
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
data_import_tool.export_json(doctype, name, path)
finally:
frappe.destroy()
@click.command('export-csv')
@click.argument('doctype')
@click.argument('path')
@pass_context
def export_csv(context, doctype, path):
"Dump DocType as csv"
from frappe.core.page.data_import_tool import data_import_tool
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
data_import_tool.export_csv(doctype, path)
finally:
frappe.destroy()
@click.command('export-fixtures')
@pass_context
def export_fixtures(context):
"export fixtures"
from frappe.utils.fixtures import export_fixtures
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
export_fixtures()
finally:
frappe.destroy()
@click.command('import-doc')
@click.argument('path')
@pass_context
def import_doc(context, path, force=False):
"Import (insert/update) doclist. If the argument is a directory, all files ending with .json are imported"
from frappe.core.page.data_import_tool import data_import_tool
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
data_import_tool.import_doc(path, overwrite=context.force)
finally:
frappe.destroy()
# translation
@click.command('build-message-files')
@pass_context
def build_message_files(context):
"Build message files for translation"
import frappe.translate
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.rebuild_all_translation_files()
finally:
frappe.destroy()
@click.command('get-untranslated')
@click.argument('lang')
@click.argument('untranslated_file')
@click.option('--all', default=False, is_flag=True, help='Get all message strings')
@pass_context
def get_untranslated(context, lang, untranslated_file, all=None):
"Get untranslated strings for language"
import frappe.translate
site = get_single_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.get_untranslated(lang, untranslated_file, get_all=all)
finally:
frappe.destroy()
@click.command('update-translations')
@click.argument('lang')
@click.argument('untranslated_file')
@click.argument('translated-file')
@pass_context
def update_translations(context, lang, untranslated_file, translated_file):
"Update translated strings"
import frappe.translate
site = get_single_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.update_translations(lang, untranslated_file, translated_file)
finally:
frappe.destroy()
@click.command('set-admin-password')
@click.argument('admin-password')
@pass_context
def set_admin_password(context, admin_password):
"Set Administrator password for a site"
import getpass
for site in context.sites:
try:
frappe.init(site=site)
while not admin_password:
admin_password = getpass.getpass("Administrator's password for {0}: ".format(site))
frappe.connect()
frappe.db.sql("""update __Auth set `password`=password(%s)
where user='Administrator'""", (admin_password,))
frappe.db.commit()
admin_password = None
finally:
frappe.destroy()
@click.command('mysql')
@pass_context
def mysql(context):
"Start Mariadb console for a site"
site = get_single_site(context)
frappe.init(site=site)
msq = find_executable('mysql')
os.execv(msq, [msq, '-u', frappe.conf.db_name, '-p'+frappe.conf.db_password, frappe.conf.db_name, '-h', frappe.conf.db_host or "localhost", "-A"])
@click.command('console')
@pass_context
def console(context):
"Start ipython console for a site"
site = get_single_site(context)
frappe.init(site=site)
frappe.connect()
import IPython
IPython.embed()
@click.command('run-tests')
@click.option('--app')
@click.option('--doctype')
@click.option('--test', multiple=True)
@click.option('--driver')
@click.option('--module')
@pass_context
def run_tests(context, app=None, module=None, doctype=None, test=(), driver=None):
"Run tests"
import frappe.test_runner
from frappe.utils import sel
tests = test
site = get_single_site(context)
# sel.start(verbose, driver)
try:
frappe.init(site=site)
ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests, force=context.force)
if len(ret.failures) == 0 and len(ret.errors) == 0:
ret = 0
finally:
pass
# sel.close()
sys.exit(ret)
@click.command('serve')
@click.option('--port', default=8000)
@click.option('--profile', is_flag=True, default=False)
@pass_context
def serve(context, port=None, profile=False, sites_path='.', site=None):
"Start development web server"
if not context.sites:
site = None
else:
site = context.sites[0]
import frappe.app
frappe.app.serve(port=port, profile=profile, site=site, sites_path='.')
@click.command('request')
@click.argument('args')
@pass_context
def request(context, args):
"Run a request as an admin"
import frappe.handler
import frappe.api
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
if "?" in args:
frappe.local.form_dict = frappe._dict([a.split("=") for a in args.split("?")[-1].split("&")])
else:
frappe.local.form_dict = frappe._dict()
if args.startswith("/api/method"):
frappe.local.form_dict.cmd = args.split("?")[0].split("/")[-1]
frappe.handler.execute_cmd(frappe.form_dict.cmd)
print frappe.response
finally:
frappe.destroy()
@click.command('doctor')
def doctor():
"Get untranslated strings for lang."
from frappe.utils.doctor import doctor as _doctor
frappe.init('')
return _doctor()
@click.command('purge-all-tasks')
def purge_all_tasks():
"Purge any pending periodic tasks of 'all' event. Doesn't purge hourly, daily and weekly"
from frappe.utils.doctor import purge_pending_tasks
count = purge_pending_tasks()
print "Purged {} tasks".format(count)
@click.command('dump-queue-status')
def dump_queue_status():
"Dump detailed diagnostic infomation for task queues in JSON format"
from frappe.utils.doctor import dump_queue_status as _dump_queue_status
print json.dumps(_dump_queue_status(), indent=1)
@click.command('make-app')
@click.argument('destination')
@click.argument('app_name')
def make_app(destination, app_name):
from frappe.utils.boilerplate import make_boilerplate
make_boilerplate(destination, app_name)
@click.command('use')
@click.argument('site')
def _use(site, sites_path='.'):
use(site, sites_path=sites_path)
def use(site, sites_path='.'):
with open(os.path.join(sites_path, "currentsite.txt"), "w") as sitefile:
sitefile.write(site)
@click.command('backup')
@click.option('--with-files', default=False, is_flag=True, help="Take backup with files")
@pass_context
def backup(context, with_files=False, backup_path_db=None, backup_path_files=None, quiet=False):
"Backup"
from frappe.utils.backups import scheduled_backup
verbose = context.verbose
for site in context.sites:
frappe.init(site=site)
frappe.connect()
odb = scheduled_backup(ignore_files=not with_files, backup_path_db=backup_path_db, backup_path_files=backup_path_files, force=True)
if verbose:
from frappe.utils import now
print "database backup taken -", odb.backup_path_db, "- on", now()
if with_files:
print "files backup taken -", odb.backup_path_files, "- on", now()
frappe.destroy()
@click.command('remove-from-installed-apps')
@click.argument('app')
@pass_context
def remove_from_installed_apps(context, app):
from frappe.installer import remove_from_installed_apps
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
remove_from_installed_apps(app)
finally:
frappe.destroy()
# commands = [
# new_site,
# restore,
# install_app,
# run_patch,
# migrate,
# add_system_manager,
# celery
# ]
commands = [
new_site,
restore,
reinstall,
install_app,
add_system_manager,
migrate,
run_patch,
reload_doc,
build,
watch,
clear_cache,
clear_website_cache,
destroy_all_sessions,
sync_www,
build_website,
setup_docs,
build_docs,
reset_perms,
execute,
celery,
trigger_scheduler_event,
enable_scheduler,
disable_scheduler,
export_doc,
export_json,
export_csv,
export_fixtures,
import_doc,
build_message_files,
get_untranslated,
update_translations,
set_admin_password,
mysql,
run_tests,
serve,
request,
doctor,
purge_all_tasks,
dump_queue_status,
console,
make_app,
_use,
backup,
remove_from_installed_apps,
]
| mit | 7,184,798,285,555,891,000 | 26.501247 | 245 | 0.7286 | false |
Septima/qgis-qlrbrowser | src/QlrBrowser/mysettings/qgissettingmanager/types/integer.py | 1 | 3535 | #-----------------------------------------------------------
#
# QGIS setting manager is a python module to easily manage read/write
# settings and set/get corresponding widgets.
#
# Copyright : (C) 2013 Denis Rouzaud
# Email : [email protected]
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this progsram; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
# for combobox, the value corresponds to the index of the combobox
from PyQt5.QtWidgets import QLineEdit, QSpinBox, QSlider, QComboBox
from qgis.core import QgsProject
from ..setting import Setting
from ..setting_widget import SettingWidget
class Integer(Setting):
def __init__(self, name, scope, default_value, options={}):
Setting.__init__(self, name, scope, default_value, int, QgsProject.instance().readNumEntry, QgsProject.instance().writeEntry, options)
def check(self, value):
if type(value) != int and type(value) != float:
raise NameError("Setting %s must be an integer." % self.name)
def config_widget(self, widget):
if type(widget) == QLineEdit:
return LineEditIntegerWidget(self, widget, self.options)
elif type(widget) in (QSpinBox, QSlider):
return SpinBoxIntegerWidget(self, widget, self.options)
elif type(widget) == QComboBox:
return ComboBoxIntegerWidget(self, widget, self.options)
else:
print(type(widget))
raise NameError("SettingManager does not handle %s widgets for integers for the moment (setting: %s)" %
(type(widget), self.name))
class LineEditIntegerWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.textChanged
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
self.widget.setText('{}'.format(value))
def widget_value(self):
try:
value = int(self.widget.text())
except ValueError:
value = None
return value
class SpinBoxIntegerWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.valueChanged
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
self.widget.setValue(value)
def widget_value(self):
return self.widget.value()
class ComboBoxIntegerWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.currentIndexChanged
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
self.widget.setCurrentIndex(value)
def widget_value(self):
return self.widget.currentIndex()
| gpl-2.0 | -6,054,332,582,927,886,000 | 34 | 142 | 0.645545 | false |
DINA-Web/datasets | collections-data/transformations/prepare_geography.py | 1 | 4838 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import numpy
import pandas
"""
Script for preparing geography data. Writes output to geography.csv.
Run from commandline, like this:
prepare_geography.py treedef_filename basetree_filename [--sweden filename]
"""
def append_country_geography(frame, country_frame, country):
country_id = frame.ID[frame.Name==country].values[0]
country_frame.ID = (
country_frame.ID.astype(int) + max(frame.ID.astype('int')))
country_frame.ParentID = (
country_frame.ID.astype(int).fillna(0) +
max(frame.ID.astype(int)))
country_frame.loc[country_frame.Name==country, 'ID'] = country_id
frame = pandas.concat([frame, country_frame])
frame.drop_duplicates(subset='ID', inplace=True)
return frame
if __name__ == '__main__':
help_text = 'Transform geography data import to Specify'
parser = argparse.ArgumentParser(description=help_text)
parser.add_argument(
dest='treedefitems',
type=argparse.FileType('r'),
help='path to file with tree definition items')
parser.add_argument(
dest='basetree',
type=argparse.FileType('r'),
help='path to file with the base tree')
parser.add_argument(
'--denmark',
dest='denmark',
type=argparse.FileType('r'),
metavar='filename',
help='path to file with geography for Denmark')
parser.add_argument(
'--finland',
dest='finland',
type=argparse.FileType('r'),
metavar='filename',
help='path to file with geography for Finland')
parser.add_argument(
'--norway',
dest='norway',
type=argparse.FileType('r'),
metavar='filename',
help='path to file with geography for Norway')
parser.add_argument(
'--sweden',
dest='sweden',
type=argparse.FileType('r'),
metavar='filename',
help='path to file with geography for Sweden')
arguments = parser.parse_args()
rank_names = {
'L0 earth' : 'Earth',
'L1 continent': 'Continent',
'L2 region': 'Region',
'L3 area': 'Area',
'L4 country': 'Country',
'L5 province': 'State',
'L6 district': 'County'}
output_columns = [
'geography_sourceid',
'parent_sourceid',
'name',
'geographytreedefitem_sourceid']
treedefitems = pandas.read_csv(arguments.treedefitems, dtype='unicode')
basetree = pandas.read_csv(arguments.basetree, dtype='unicode')
# Add root node
root_id = min(basetree.ID.astype(int) - 1)
basetree.loc[basetree.ParentID.isnull(), 'ParentID'] = root_id
number_to_add = 1 - root_id
basetree.ID = basetree.ID.astype(int) + number_to_add
basetree.ParentID = basetree.ParentID.astype(int) + number_to_add
basetree = basetree.append({
'ID': root_id + number_to_add,
'ParentID': numpy.nan,
'Name': 'Earth',
'Category': 'L0 earth'}, ignore_index=True)
basetree = basetree[['ID', 'ParentID', 'Name', 'Category']]
if arguments.denmark:
geo_den = pandas.read_csv(arguments.denmark, dtype='unicode')
geo_den = geo_den[['ID', 'ParentID', 'Name', 'Category']]
basetree = append_country_geography(basetree, geo_den, 'Denmark')
if arguments.finland:
geo_fin = pandas.read_csv(arguments.finland, dtype='unicode')
geo_fin = geo_fin[['ID', 'ParentID', 'Name', 'Category']]
basetree = append_country_geography(basetree, geo_fin, 'Finland')
if arguments.norway:
geo_nor = pandas.read_csv(arguments.norway, dtype='unicode')
geo_nor = geo_nor[['ID', 'ParentID', 'Name', 'Category']]
basetree = append_country_geography(basetree, geo_nor, 'Norway')
if arguments.sweden:
geo_swe = pandas.read_csv(arguments.sweden, dtype='unicode')
geo_swe = geo_swe[['ID', 'ParentID', 'Name', 'Category']]
basetree = append_country_geography(basetree, geo_swe, 'Sweden')
basetree['Category'] = basetree['Category'].replace(rank_names)
treedefitems_merge = treedefitems[[
'geographytreedefitem_sourceid',
'name']].rename(columns={'name': 'Category'})
geography = basetree.merge(
treedefitems_merge, how='inner', on='Category')
geography.rename(columns={
'Name': 'name',
'ID': 'geography_sourceid',
'ParentID': 'parent_sourceid'}, inplace=True)
geography.geography_sourceid = geography.geography_sourceid.astype(int)
geography.sort_values(by='geography_sourceid', inplace=True)
geography.parent_sourceid = (
geography.parent_sourceid.dropna().astype(int).astype(str))
geography[output_columns].to_csv('geography.csv', index=False, float='%g')
| cc0-1.0 | -5,945,689,383,502,212,000 | 31.039735 | 79 | 0.622778 | false |
jeKnowledge/horarios-inforestudante | TimetableMaker.py | 1 | 8893 | from itertools import combinations
# Recebe dicionario de aulas de estrutura:
# { CLASS_ID:{
# T:{
# CLASS_T1,
# CLASS_T...
# },
# TP:{
# CLASS_TP...
# },
# ...
# }, ...
# }
# Devolve array de todas as combinacoes de turmas possiveis
# Refere-se aos elementos no dicionario por tuples: (aula, tipo, turma)
# IGNORA SOBREPOSICOES
def possibleCombinations(dictionary):
# Combinacoes de turmas validos (todos os tipos presentes)
# Para cada aula, i.e., e necessario fazer combinacao de turmas
combTurmasValidas = []
aulas = [] # List de todas as aulas por numero
#Fazer combinacoes dentro de cada aula
for aula in dictionary:
turmas = [] # List de de todas as turmas nesta aula (disciplina), como tuple
tipos = [] # Tipos de aula (T/TP/PL)
for tipo in dictionary[aula]:
tipos.append(tipo)
for turma in dictionary[aula][tipo]:
turmas.append((aula, tipo, turma))
combTurmas = combinations(turmas, len(tipos)) # Todas as combinacoes possiveis, incluindo (TP,TP,TP,TP)
for comb in combTurmas:
tiposNaComb = [] # Quais os tipos de aula nesta combinacao; deverao ser todos
for turma in comb:
tipo = turma[1] # Cada turma é representada por uma tuple (aula, tipo, turma); turma[1] devolve tipo
if tipo not in tiposNaComb:
tiposNaComb.append(tipo)
#Se a combinacao nao possuir todos os tipos de aula nao e valida
if set(tiposNaComb) != set(tipos):
continue
combTurmasValidas.append(comb)
aulas.append(aula)
# Fazer combinacoes de aulas, tendo em conta combinacoes "legais" de turmas
# Pelo mesmo processo que para as aulas:
# Fazer todas as combinacoes possiveis e remover as que nao incluirem todas as aulas
combAulas = combinations(combTurmasValidas, len(aulas))
combAulasValidas = [] # Todas as combinacoes de turmas
for comb in combAulas:
aulasInComb = [] # List de aulas incluidas nesta combinacao; deverao ser todas
for turmaComb in comb: # Combinacao de turmas para uma aula; tira-se o id da aula pelo primeiro elemento
if turmaComb[0][0] not in aulasInComb:
aulasInComb.append(turmaComb[0][0]) # comb[0] == (aula, tipo, turma); tuple[0] == aula
# Se esta combinacao de turmas possuir nao todas as aulas, nao e valida
if set(aulasInComb) != set(aulas):
continue
# Verificar se a combinação nao existe ja sob outra ordem
existe = False
for combValida in combAulasValidas:
if set(combValida) == set(comb):
existe = True
break
if existe:
continue
combAulasValidas.append(comb)
return combAulasValidas
# Recebe input:
# Dicionario:
# { CLASS_ID:{
# T:{
# [T1_obj, T1_obj, T1_obj,...],
# CLASS_T...
# },
# TP:{
# CLASS_TP...
# },
# ...
# }, ...
# Combinacoes validas de aulas:
# ( ( ((aula1, tipo1, turma1), (aula1, tipo2, turma1)), ((aula2, tipo1, turma1), (aula2, tipo2, turma1)) ), ... )
# Verifica se ha sobreposicao de aulas e se houver, remove-as
# Devolve lista de combinacoes sem sobreposicoes
def removeOverlaps(dictionary, validCombinations):
noOverlaps = [] # Resultado a devolver
for comb in validCombinations:
turmas = [] # turmas com "coordenadas", sob a forma (horaInicio, horaFim, (aula, tipo, turma))
# Criar tuples de horas e colocar na array
for aulaComb in comb:
for turma in aulaComb:
aulas = dictionary[turma[0]][turma[1]][turma[2]] # Tirar objetos Aula do dicionario (multiplos!)
for aula in aulas:
# Criar tuple com horas inicio/fim, dia e turma (disciplina/tipo/turma)
ref = (aula.horaInicio, aula.horaFim, aula.dia, (turma[0], turma[1], turma[2]))
turmas.append(ref)
# Criar pares
todosPares = combinations(turmas, 2)
pares = []
# Retirar pares de mesmas aulas
for par in todosPares:
# Verificar se turmas diferentes
turmaA = par[0][3]
turmaB = par[1][3]
if turmaA[0] != turmaB[0] or turmaA[1] != turmaB[1] or turmaA[2] != turmaB[2]:
pares.append(par)
# Verificar sobreposicao em cada par
combSemSobreposicoes = True
for par in pares:
a = par[0]
b = par[1]
# Dias diferentes?
if a[2] != b[2]:
continue
cedo = min(a[0], b[0])
tarde = max(a[1], b[1])
delta = tarde - cedo
# Aulas sobrepoem-se
if a[1]-a[0]+b[1]-b[0] > delta:
combSemSobreposicoes = False
break
if combSemSobreposicoes:
noOverlaps.append(comb)
return noOverlaps
from openpyxl import Workbook
from openpyxl.styles import Color, PatternFill, Style, Fill, Font
from random import randint
# Recebe input:
# Dicionario:
# { CLASS_ID:{
# T:{
# [T1_obj, T1_obj, T1_obj,...],
# CLASS_T...
# },
# TP:{
# CLASS_TP...
# },
# ...
# }, ...
# Combinacoes de aulas:
# ( ( ((aula1, tipo1, turma1), (aula1, tipo2, turma1)), ((aula2, tipo1, turma1), (aula2, tipo2, turma1)) ), ... )
# Grava um ficheiro xlsm (output.xlsm)
# Devolve workbook do openpyxl
def outputExcel(dictionary, combinations):
if len(combinations) == 0:
print("No combinations!")
return
wb = Workbook()
wb.remove_sheet(wb.active) # Apagar folha default
combinationNumber = 0
for comb in combinations:
ws = wb.create_sheet(str(combinationNumber)) # Criar uma nova folha com um id para referencia
# Labels de dia
ws['B1'] = "Segunda"
ws['C1'] = "Terça"
ws['D1'] = "Quarta"
ws['E1'] = "Quinta"
ws['F1'] = "Sexta"
ws['G1'] = "Sabado"
ws['H1'] = "Domingo"
# Labels de hora (30/30 minutos, das 8 as 22)
i = 2
for n in range(80,220,5):
ws['A'+str(i)] = str(int(n/10)) + "h" + str(int(((n/10)%1)*60)) + "m"
i += 1
# Desenhar aulas
for disciplina in comb:
for coord in disciplina:
aulaObjList = dictionary[coord[0]][coord[1]][coord[2]]
for aulaObj in aulaObjList:
# Tirar meia hora ao fim, para que nao haja merge sobreposto
cellRange = diaParaLetra(aulaObj.dia) + horaParaNumero(aulaObj.horaInicio) + ":"\
+ diaParaLetra(aulaObj.dia) + horaParaNumero(aulaObj.horaFim - 0.5)
ws.merge_cells(cellRange)
# Add label
ws[diaParaLetra(aulaObj.dia) + horaParaNumero(aulaObj.horaInicio)] = aulaObj.aulaNome +\
"," + aulaObj.turma
combinationNumber += 1 # Para referencia
wb.save('output.xlsx')
return wb
# ______ Helper functions para output: _________
def diaParaLetra(dia):
if dia == 0:
return "B"
if dia == 1:
return "C"
if dia == 2:
return "D"
if dia == 3:
return "E"
if dia == 4:
return "F"
if dia == 5:
return "G"
if dia == 6:
return "H"
def horaParaNumero(hora):
delta = hora - 8
return str(int(delta/0.5) + 2)
# _____________________________________________
# XLSXtoHTMLdemo
# Program to convert the data from an XLSX file to HTML.
# Uses the openpyxl library.
# Author: Vasudev Ram - http://www.dancingbison.com
# Altered by Miguel Murça for the purposes of this program
import openpyxl
from openpyxl import load_workbook
def convertExcelToWeb(workbook):
worksheets = workbook._sheets
for worksheet in worksheets:
html_data = """
<html>
<head>
<title>
Horario
</title>
<head>
<body>
<table>
"""
ws_range = worksheet.iter_rows('A1:I30')
for row in ws_range:
html_data += "<tr>"
for cell in row:
if cell.value is None:
html_data += "<td>" + ' ' + "<td>"
else:
html_data += "<td>" + str(cell.value) + "<td>"
html_data += "<tr>"
html_data += "</table>\n</body>\n</html>"
with open(worksheet.title + ".html", "w") as html_fil:
html_fil.write(html_data)
# EOF | mit | -8,867,499,705,870,523,000 | 29.337884 | 117 | 0.540279 | false |
pinballwizard/phone | sms/migrations/false/0005_subscriber.py | 1 | 1070 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-24 04:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sms', '0004_smssended_delivered'),
]
operations = [
migrations.CreateModel(
name='Subscriber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile', models.CharField(max_length=11, unique=True, verbose_name='Номер телефона')),
('account', models.CharField(max_length=12, verbose_name='Лицевой счет')),
('blocked', models.BooleanField(verbose_name='Заблокирован')),
('ban_date', models.DateField(verbose_name='Дата блокировки')),
],
options={
'verbose_name': 'Абонент',
'verbose_name_plural': 'Абоненты',
},
),
]
| lgpl-3.0 | -2,315,646,766,958,779,400 | 33.655172 | 114 | 0.566169 | false |
EndPointCorp/lg_ros_nodes | lg_volume_control/test/test_volume_control.py | 1 | 3071 | #!/usr/bin/env python3
PKG = 'lg_volume_control'
NAME = 'test_volume_control'
import unittest
import os
from lg_volume_control import VolumeControlMaster
from std_msgs.msg import UInt8, Int8
from lg_common.helpers import write_log_to_file
class MockPub:
def __init__(self):
self.data = []
def publish(self, msg):
self.data.append(msg)
class TestVolumeControl(unittest.TestCase):
def setUp(self):
self.mock_pub = MockPub()
# setting scale to 1 to make tests easier to read
self.volume_controller = VolumeControlMaster(self.mock_pub, scale=1)
def test_initial_volume(self):
"""
Volume is initially clamped between default/2 and default
"""
self.assertEqual(len(self.mock_pub.data), 1)
write_log_to_file("%s" % self.mock_pub.data[0])
self.assertGreaterEqual(self.mock_pub.data[0], self.volume_controller.default_volume / 2)
self.assertLessEqual(self.mock_pub.data[0], self.volume_controller.default_volume)
def test_max_volume(self):
"""
Grab the initial data from self.data[0] and then add just enough to be 100
and then check that the new volume is 100, then add more than 100 and
check that the volume returned does not exceed 100
"""
current_volume = self.mock_pub.data[0]
# calculate the ammount needed to increment the current volume
increment = 100 - current_volume
# craft a message and call the handle_volume callback
increment_message = Int8()
increment_message.data = increment
self.volume_controller.handle_change_volume(increment_message)
# check that we are at 100 volume now
self.assertEqual(self.mock_pub.data[-1], 100)
# increment another 1 volume
increment_message.data = 1
self.volume_controller.handle_change_volume(increment_message)
# check that we are _still_ 100 (we should never go above 100 volume)
self.assertEqual(self.mock_pub.data[-1], 100)
def test_min_volume(self):
"""
similar to test_max_volume, but checking that we don't go below 0
"""
current_volume = self.mock_pub.data[0]
# calculate the ammount needed to increment the current volume
increment = 0 - current_volume
# craft a message and call the handle_volume callback
increment_message = Int8()
increment_message.data = increment
self.volume_controller.handle_change_volume(increment_message)
# check that we are at 0 volume now
self.assertEqual(self.mock_pub.data[-1], 0)
# decrement another 1 volume
increment_message.data = -1
self.volume_controller.handle_change_volume(increment_message)
# check that we are _still_ 0 (we should never go below 0 volume)
self.assertEqual(self.mock_pub.data[-1], 0)
if __name__ == '__main__':
import rostest
rostest.rosrun(PKG, NAME, TestVolumeControl)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| apache-2.0 | -1,981,711,366,369,654,300 | 33.122222 | 97 | 0.659069 | false |
googlecodelabs/nest-tensorflow | codelab/classify.py | 1 | 3339 | #!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib2
import os.path
import numpy as np
import tensorflow as tf
from node_lookup import NodeLookup
from errors import error_result
snapshot_file = 'tmp/tmp.jpg'
model_dir = 'tmp/imagenet'
num_top_predictions = 5
def classify_remote_image(image_url):
# Attempt to Download
try:
image = download_image(image_url)
except IOError:
return error_result("Camera's Snapshot URL could not be downloaded")
# Attempt to Classify
try:
results = run_inference_on_image(image)
except:
return error_result("Could not classify the image")
return {
"image_url": image_url,
"results": results
}
def create_graph():
with tf.gfile.FastGFile(os.path.join(
model_dir, 'classify_image_graph_def.pb'
), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image(image):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing
"""
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
top_k = predictions.argsort()[-num_top_predictions:][::-1]
results = {}
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
results[human_string] = float(score)
return results
def download_image(url):
# Downloads the image from the specified URL to the filesystem
response = urllib2.urlopen(url)
body = response.read()
if body == '':
raise IOError('The Snapshot URL did not contain any HTTP body when fetched')
with open(snapshot_file, 'w') as f:
f.write(body)
return snapshot_file
| apache-2.0 | 8,367,322,700,183,847,000 | 30.5 | 84 | 0.649596 | false |
mikejthomas/biote100_pset2 | mode.py | 1 | 2238 | #Python Problem 3
#mode.py
#Introduction to Bioinformatics Assignment 2
#Purpose:Calculating the mode of a dataset
#Your Name: Michael Thomas
#Date: 10/12/15
#will contain all the data of a list including some unwanted carriage return symbols
tempList = []
#will contain the data as floats after it is stripped and the header is removed
myData = []
#will store the header
header = ""
#this will store the value of the mode
mode = 0
#TASK1
#We have the file data.txt read all the values of this file in the list tempList
#open data.txt and save to a tempList
text_file = open('data.txt' , 'r')
tempList = text_file.readlines()
text_file.close()
print tempList
#TASK2
#We don't want the header to be in the list Pop it and save it into the header variable
#pop out header (position 0 in list tempList) and save to header.
header = tempList.pop(0).strip()
print header
#TASK3
#for every member of tempList, clean it up from carriage return, convert it into a float and add it to the list myData
#Using list comprehension, we delete all '\r\n' from each line in tempList
myData = [line.rstrip('\r\n') for line in tempList]
#Simialr to the list comprehension above, we convert all items to floats
myData = [float(i) for i in myData]
print myData
#print type(myData[1])
#TASK4
#Sort the list myData
myData.sort()
print myData
#TASK5
#using the list of floats myData Find the MODE of the data
#The mode of a dataset is the number that occurs most frequently.
#i.e. in the list [2,2,3,3,3,4] the mode is 3
#create dictionary myDic
myDic = {}
#using exceptions, we can incriment the key if a repeat
#value is found, except if the value is unique
#this will create a counter that will contain
#len(myData) keys and their corresponding values
#that each key repeats in myData
for i in myData:
try:
myDic[i] += 1
except:
myDic[i] = 1
print myDic
#calculate the maximum values in the dictionary
#this will represent the value of the mode
maxval = max(myDic.values())
#for loop to print the key for the
#corresponding value of maxval which will
#be the mode for the dataset
for key, value in myDic.items():
if value == maxval:
mode = key
#print results
print "\n"
print "The mode of the:", header, " dataset is: ", mode
print "\n"
| mit | -4,476,699,867,430,466,000 | 28.447368 | 118 | 0.743074 | false |
ddragon15/Overlooked-OvercookedFangame-WIP- | items.py | 1 | 4159 | import pygame
from pygame.locals import *
import math
import random
import magic
class All():
DebugBool = False
DebugV = [0,0]
isHold = False
isOccupied = False
processable = True
def Draw(self):
playerrot = pygame.transform.rotate(self.image ,self.rot)
playerpos1 = (self.pos[0]-32, self.pos[1]-32)
magic.mapScreen.blit(playerrot, playerpos1)
# pygame.draw.rect(magic.mapScreen, (50,50,131), pygame.Rect((x,y),(64,64)))
def checkCollision(self, pos):
boxrect = pygame.Rect((pos[0],pos[1]),(20,20))
myRect = pygame.Rect((self.pos[0]-16,self.pos[1]-16),(34,34))
# self.DebugV = myRect
# self.DebugBool = True
boxrect.topleft = [pos[0],pos[1]]
if myRect.colliderect(boxrect):
return True
else:
return False
def Debug(self):
if self.DebugBool:
pygame.draw.rect(magic.mapScreen, (50,250,131), self.DebugV)
# self.DebugV[0] = self.pos[0]-8
# self.DebugV[1] = self.y-8
# self.DebugBool = True
def setPos(self, pos):
self.pos = [pos[0]+8,pos[1]+8]
class Onion(All):
tag = "onion"
def __init__(self, x, y):
self.skin = "resources/images/onion.png"
self.image = pygame.image.load(self.skin)
# w,h = self.image.get_size()
# self.image = pygame.transform.scale(self.image, (int(w),int(h)))
self.rect = self.image.get_rect()
self.rect.topleft = [x,y]
self.pos = [x+32,y+32]
self.rot = 0 #random.randint(0, 360)*1
def Update(self):
All.Draw(self)
All.Debug(self)
def changeSkin(self):
if self.skin is not "resource/image/onionS.png":
self.skin = "resources/images/onionS.png"
self.image = pygame.image.load(self.skin)
class Tomato(All):
tag = "tomato"
def __init__(self, x, y):
self.skin = "resources/images/tomato.png"
self.image = pygame.image.load(self.skin)
# w,h = self.image.get_size()
# self.image = pygame.transform.scale(self.image, (int(w),int(h)))
self.rect = self.image.get_rect()
self.rect.topleft = [x,y]
self.pos = [x+32,y+32]
self.rot = 0 #random.randint(0, 360)*1
def Update(self):
All.Draw(self)
All.Debug(self)
def changeSkin(self):
if self.skin is not "resource/image/tomatoS.png":
self.skin = "resources/images/tomatoS.png"
self.image = pygame.image.load(self.skin)
class Lettuce(All):
tag = "lettuce"
def __init__(self, x, y):
self.skin = "resources/images/lettuce.png"
self.image = pygame.image.load(self.skin)
# w,h = self.image.get_size()
# self.image = pygame.transform.scale(self.image, (int(w),int(h)))
self.rect = self.image.get_rect()
self.rect.topleft = [x,y]
self.pos = [x+32,y+32]
self.rot = 0 #random.randint(0, 360)*1
def Update(self):
All.Draw(self)
All.Debug(self)
def changeSkin(self):
if self.skin is not "resource/image/lettuceS.png":
self.skin = "resources/images/lettuceS.png"
self.image = pygame.image.load(self.skin)
class Plate(All):
processable = False
# TODO make states for different Foods
def __init__(self, x, y):
self.skin = "resources/images/plate.png"
self.image = pygame.image.load(self.skin)
self.rect = self.image.get_rect()
self.rect.topleft = [x,y]
self.pos = [x+32,y+32]
self.rot = 0
def Update(self):
All.Draw(self)
All.Debug(self)
# TODO Plate states
# If an item sits ontop of the Plate
# Loop through Combinations out of all incedience onto the plate plus the new one
# Take the first one all incredience work on
# Consume the item (delete it)
# Change Skin
# TODO Make a map out of all recipies (maybe in another File)
# Which items are needet?
# Can it be processed by something?
# Which state is the plate in? Choose Skin for swap and return it
| gpl-3.0 | 3,271,743,681,788,671,000 | 29.580882 | 89 | 0.582592 | false |
TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/gui/splash.py | 1 | 1689 | '''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <[email protected]>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import wx
from taskcoachlib import i18n
try:
import icons
except ImportError: # pragma: no cover
print "ERROR: couldn't import icons.py."
print 'You need to generate the icons file.'
print 'Run "make prepare" in the Task Coach root folder.'
import sys
sys.exit(1)
class SplashScreen(wx.SplashScreen):
def __init__(self):
splash = icons.catalog['splash']
if i18n.currentLanguageIsRightToLeft():
# RTL languages cause the bitmap to be mirrored too, but because
# the splash image is not internationalized, we have to mirror it
# (back). Unfortunately using SetLayoutDirection() on the
# SplashWindow doesn't work.
bitmap = wx.BitmapFromImage(splash.getImage().Mirror())
else:
bitmap = splash.getBitmap()
super(SplashScreen, self).__init__(bitmap,
wx.SPLASH_CENTRE_ON_SCREEN|wx.SPLASH_TIMEOUT, 4000, None, -1)
| gpl-3.0 | -2,813,246,547,384,950,300 | 37.386364 | 77 | 0.704559 | false |
Trax-air/swagger-stub | setup.py | 1 | 1752 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = parse_requirements('requirements.txt')
test_requirements = parse_requirements('requirements_dev.txt')
setup(
name='swagger_stub',
version='0.2.1',
description="Generate a stub from a swagger file",
long_description=readme + '\n\n' + history,
author="Cyprien Guillemot",
author_email='[email protected]',
url='https://github.com/Trax-air/swagger-stub',
packages=[
'swagger_stub',
],
package_dir={'swagger_stub':
'swagger_stub'},
include_package_data=True,
setup_requires=['pytest-runner'],
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords='swagger, stub, API, REST, swagger-stub',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
| mit | 6,996,824,572,063,562,000 | 30.854545 | 75 | 0.642694 | false |
caneruguz/osf.io | api/base/settings/defaults.py | 1 | 8833 | """
Django settings for api project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
from urlparse import urlparse
from website import settings as osf_settings
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
DATABASES = {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'osf.db.backends.postgresql', # django.db.backends.postgresql
'NAME': os.environ.get('OSF_DB_NAME', 'osf'),
'USER': os.environ.get('OSF_DB_USER', 'postgres'),
'PASSWORD': os.environ.get('OSF_DB_PASSWORD', ''),
'HOST': os.environ.get('OSF_DB_HOST', '127.0.0.1'),
'PORT': os.environ.get('OSF_DB_PORT', '5432'),
'ATOMIC_REQUESTS': True,
}
}
DATABASE_ROUTERS = ['osf.db.router.PostgreSQLFailoverRouter', ]
CELERY_IMPORTS = [
'osf.management.commands.migratedata',
'osf.management.commands.migraterelations',
'osf.management.commands.verify',
]
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
AUTH_USER_MODEL = 'osf.OSFUser'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = osf_settings.SECRET_KEY
AUTHENTICATION_BACKENDS = (
'api.base.authentication.backends.ODMBackend',
)
# SECURITY WARNING: don't run with debug turned on in production!
DEV_MODE = osf_settings.DEV_MODE
DEBUG = osf_settings.DEBUG_MODE
DEBUG_PROPAGATE_EXCEPTIONS = True
# session:
SESSION_COOKIE_NAME = 'api'
SESSION_COOKIE_SECURE = osf_settings.SECURE_MODE
SESSION_COOKIE_HTTPONLY = osf_settings.SESSION_COOKIE_HTTPONLY
# csrf:
CSRF_COOKIE_NAME = 'api-csrf'
CSRF_COOKIE_SECURE = osf_settings.SECURE_MODE
CSRF_COOKIE_HTTPONLY = osf_settings.SECURE_MODE
ALLOWED_HOSTS = [
'.osf.io'
]
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.admin',
# 3rd party
'rest_framework',
'rest_framework_swagger',
'corsheaders',
'raven.contrib.django.raven_compat',
'django_extensions',
# OSF
'osf',
# Addons
'addons.osfstorage',
'addons.bitbucket',
'addons.box',
'addons.dataverse',
'addons.dropbox',
'addons.figshare',
'addons.forward',
'addons.github',
'addons.googledrive',
'addons.mendeley',
'addons.owncloud',
'addons.s3',
'addons.twofactor',
'addons.wiki',
'addons.zotero',
)
# local development using https
if osf_settings.SECURE_MODE and DEBUG:
INSTALLED_APPS += ('sslserver',)
# TODO: Are there more granular ways to configure reporting specifically related to the API?
RAVEN_CONFIG = {
'tags': {'App': 'api'},
'dsn': osf_settings.SENTRY_DSN,
'release': osf_settings.VERSION,
}
BULK_SETTINGS = {
'DEFAULT_BULK_LIMIT': 100
}
MAX_PAGE_SIZE = 100
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
# Order is important here because of a bug in rest_framework_swagger. For now,
# rest_framework.renderers.JSONRenderer needs to be first, at least until
# https://github.com/marcgibbons/django-rest-swagger/issues/271 is resolved.
'DEFAULT_RENDERER_CLASSES': (
'api.base.renderers.JSONAPIRenderer',
'api.base.renderers.JSONRendererWithESISupport',
'api.base.renderers.BrowsableAPIRendererNoForms',
),
'DEFAULT_PARSER_CLASSES': (
'api.base.parsers.JSONAPIParser',
'api.base.parsers.JSONAPIParserForRegularJSON',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'EXCEPTION_HANDLER': 'api.base.exceptions.json_api_exception_handler',
'DEFAULT_CONTENT_NEGOTIATION_CLASS': 'api.base.content_negotiation.JSONAPIContentNegotiation',
'DEFAULT_VERSIONING_CLASS': 'api.base.versioning.BaseVersioning',
'DEFAULT_VERSION': '2.0',
'ALLOWED_VERSIONS': (
'2.0',
'2.1',
'2.2',
'2.3',
'2.4',
'2.5',
'2.6',
),
'DEFAULT_FILTER_BACKENDS': ('api.base.filters.ODMOrderingFilter',),
'DEFAULT_PAGINATION_CLASS': 'api.base.pagination.JSONAPIPagination',
'ORDERING_PARAM': 'sort',
'DEFAULT_AUTHENTICATION_CLASSES': (
# Custom auth classes
'api.base.authentication.drf.OSFBasicAuthentication',
'api.base.authentication.drf.OSFSessionAuthentication',
'api.base.authentication.drf.OSFCASAuthentication'
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.UserRateThrottle',
'api.base.throttling.NonCookieAuthThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'user': '10000/day',
'non-cookie-auth': '100/hour',
'add-contributor': '10/second',
'create-guid': '1000/hour',
'root-anon-throttle': '1000/hour',
'test-user': '2/hour',
'test-anon': '1/hour',
}
}
# Settings related to CORS Headers addon: allow API to receive authenticated requests from OSF
# CORS plugin only matches based on "netloc" part of URL, so as workaround we add that to the list
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (urlparse(osf_settings.DOMAIN).netloc,
osf_settings.DOMAIN,
)
# This needs to remain True to allow cross origin requests that are in CORS_ORIGIN_WHITELIST to
# use cookies.
CORS_ALLOW_CREDENTIALS = True
# Set dynamically on app init
ORIGINS_WHITELIST = ()
MIDDLEWARE_CLASSES = (
'api.base.middleware.DjangoGlobalMiddleware',
'api.base.middleware.CeleryTaskMiddleware',
'api.base.middleware.PostcommitTaskMiddleware',
# A profiling middleware. ONLY FOR DEV USE
# Uncomment and add "prof" to url params to recieve a profile for that url
# 'api.base.middleware.ProfileMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
'api.base.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True
}]
ROOT_URLCONF = 'api.base.urls'
WSGI_APPLICATION = 'api.base.wsgi.application'
LANGUAGE_CODE = 'en-us'
# Disabled to make a test work (TestNodeLog.test_formatted_date)
# TODO Try to understand what's happening to cause the test to break when that line is active.
# TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static/vendor')
API_BASE = 'v2/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
('rest_framework_swagger/css', os.path.join(BASE_DIR, 'static/css')),
('rest_framework_swagger/images', os.path.join(BASE_DIR, 'static/images')),
)
# TODO: Revisit methods for excluding private routes from swagger docs
SWAGGER_SETTINGS = {
'api_path': '/',
'info': {
'description':
"""
Welcome to the fine documentation for the Open Science Framework's API! Please click
on the <strong>GET /v2/</strong> link below to get started.
For the most recent docs, please check out our <a href="/v2/">Browsable API</a>.
""",
'title': 'OSF APIv2 Documentation',
},
'doc_expansion': 'list',
'exclude_namespaces': ['applications', 'tokens', 'test'],
}
NODE_CATEGORY_MAP = osf_settings.NODE_CATEGORY_MAP
DEBUG_TRANSACTIONS = DEBUG
JWT_SECRET = 'osf_api_cas_login_jwt_secret_32b'
JWE_SECRET = 'osf_api_cas_login_jwe_secret_32b'
ENABLE_VARNISH = osf_settings.ENABLE_VARNISH
ENABLE_ESI = osf_settings.ENABLE_ESI
VARNISH_SERVERS = osf_settings.VARNISH_SERVERS
ESI_MEDIA_TYPES = osf_settings.ESI_MEDIA_TYPES
ADDONS_FOLDER_CONFIGURABLE = ['box', 'dropbox', 's3', 'googledrive', 'figshare', 'owncloud']
ADDONS_OAUTH = ADDONS_FOLDER_CONFIGURABLE + ['dataverse', 'github', 'bitbucket', 'mendeley', 'zotero', 'forward']
BYPASS_THROTTLE_TOKEN = 'test-token'
OSF_SHELL_USER_IMPORTS = None
# Settings for use in the admin
OSF_URL = 'https://osf.io'
| apache-2.0 | 4,021,562,499,466,621,400 | 29.458621 | 113 | 0.681535 | false |
mozilla/stoneridge | wpr/httparchive.py | 1 | 25649 | #!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""View and edit HTTP Archives.
To list all URLs in an archive:
$ ./httparchive.py ls archive.wpr
To view the content of all URLs from example.com:
$ ./httparchive.py cat --host example.com archive.wpr
To view the content of a particular URL:
$ ./httparchive.py cat --host www.example.com --path /foo archive.wpr
To view the content of all URLs:
$ ./httparchive.py cat archive.wpr
To edit a particular URL:
$ ./httparchive.py edit --host www.example.com --path /foo archive.wpr
"""
import difflib
import email.utils
import httplib
import httpzlib
import json
import logging
import optparse
import os
import persistentmixin
import StringIO
import subprocess
import sys
import tempfile
import urlparse
import platformsettings
class HttpArchiveException(Exception):
"""Base class for all exceptions in httparchive."""
pass
class HttpArchive(dict, persistentmixin.PersistentMixin):
"""Dict with ArchivedHttpRequest keys and ArchivedHttpResponse values.
PersistentMixin adds CreateNew(filename), Load(filename), and Persist().
Attributes:
server_rtt: dict of {hostname, server rtt in milliseconds}
"""
def __init__(self):
self.server_rtt = {}
def get_server_rtt(self, server):
"""Retrieves the round trip time (rtt) to the server
Args:
server: the hostname of the server
Returns:
round trip time to the server in seconds, or 0 if unavailable
"""
if server not in self.server_rtt:
platform_settings = platformsettings.get_platform_settings()
self.server_rtt[server] = platform_settings.ping(server)
return self.server_rtt[server]
def get(self, request, default=None):
"""Return the archived response for a given request.
Does extra checking for handling some HTTP request headers.
Args:
request: instance of ArchivedHttpRequest
default: default value to return if request is not found
Returns:
Instance of ArchivedHttpResponse or default if no matching
response is found
"""
if request in self:
return self[request]
return self.get_conditional_response(request, default)
def get_conditional_response(self, request, default):
"""Get the response based on the conditional HTTP request headers.
Args:
request: an ArchivedHttpRequest representing the original request.
default: default ArchivedHttpResponse
original request with matched headers removed.
Returns:
an ArchivedHttpResponse with a status of 200, 302 (not modified), or
412 (precondition failed)
"""
response = default
if request.is_conditional():
stripped_request = request.create_request_without_conditions()
if stripped_request in self:
response = self[stripped_request]
if response.status == 200:
status = self.get_conditional_status(request, response)
if status != 200:
response = create_response(status)
return response
def get_conditional_status(self, request, response):
status = 200
last_modified = email.utils.parsedate(
response.get_header_case_insensitive('last-modified'))
response_etag = response.get_header_case_insensitive('etag')
is_get_or_head = request.command.upper() in ('GET', 'HEAD')
match_value = request.headers.get('if-match', None)
if match_value:
if self.is_etag_match(match_value, response_etag):
status = 200
else:
status = 412 # precondition failed
none_match_value = request.headers.get('if-none-match', None)
if none_match_value:
if self.is_etag_match(none_match_value, response_etag):
status = 304
elif is_get_or_head:
status = 200
else:
status = 412
if is_get_or_head and last_modified:
for header in ('if-modified-since', 'if-unmodified-since'):
date = email.utils.parsedate(request.headers.get(header, None))
if date:
if ((header == 'if-modified-since' and last_modified > date) or
(header == 'if-unmodified-since' and last_modified < date)):
if status != 412:
status = 200
else:
status = 304 # not modified
return status
def is_etag_match(self, request_etag, response_etag):
"""Determines whether the entity tags of the request/response matches.
Args:
request_etag: the value string of the "if-(none)-match:"
portion of the request header
response_etag: the etag value of the response
Returns:
True on match, False otherwise
"""
response_etag = response_etag.strip('" ')
for etag in request_etag.split(','):
etag = etag.strip('" ')
if etag in ('*', response_etag):
return True
return False
def get_requests(self, command=None, host=None, path=None, use_query=True):
"""Return a list of requests that match the given args."""
return [r for r in self if r.matches(command, host, path,
use_query=use_query)]
def ls(self, command=None, host=None, path=None):
"""List all URLs that match given params."""
return ''.join(sorted(
'%s\n' % r for r in self.get_requests(command, host, path)))
def cat(self, command=None, host=None, path=None):
"""Print the contents of all URLs that match given params."""
out = StringIO.StringIO()
for request in self.get_requests(command, host, path):
print >>out, str(request)
print >>out, 'Untrimmed request headers:'
for k in request.headers:
print >>out, ' %s: %s' % (k, request.headers[k])
if request.request_body:
print >>out, request.request_body
print >>out, '---- Response Info', '-' * 51
response = self[request]
chunk_lengths = [len(x) for x in response.response_data]
print >>out, ('Status: %s\n'
'Reason: %s\n'
'Headers delay: %s\n'
'Response headers:') % (
response.status, response.reason, response.delays['headers'])
for k, v in response.headers:
print >>out, ' %s: %s' % (k, v)
print >>out, ('Chunk count: %s\n'
'Chunk lengths: %s\n'
'Chunk delays: %s') % (
len(chunk_lengths), chunk_lengths, response.delays['data'])
body = response.get_data_as_text()
print >>out, '---- Response Data', '-' * 51
if body:
print >>out, body
else:
print >>out, '[binary data]'
print >>out, '=' * 70
return out.getvalue()
def edit(self, command=None, host=None, path=None):
"""Edits the single request which matches given params."""
editor = os.getenv('EDITOR')
if not editor:
print 'You must set the EDITOR environmental variable.'
return
matching_requests = self.get_requests(command, host, path)
if not matching_requests:
print 'Failed to find any requests matching given command, host, path.'
return
if len(matching_requests) > 1:
print 'Found multiple matching requests. Please refine.'
print self.ls(command, host, path)
response = self[matching_requests[0]]
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.write(response.get_response_as_text())
tmp_file.close()
subprocess.check_call([editor, tmp_file.name])
response.set_response_from_text(''.join(open(tmp_file.name).readlines()))
os.remove(tmp_file.name)
def _format_request_lines(self, req):
"""Format request to make diffs easier to read.
Args:
req: an ArchivedHttpRequest
Returns:
Example:
['GET www.example.com/path\n', 'Header-Key: header value\n', ...]
"""
parts = ['%s %s%s\n' % (req.command, req.host, req.path)]
if req.request_body:
parts.append('%s\n' % req.request_body)
for k, v in req.trimmed_headers:
k = '-'.join(x.capitalize() for x in k.split('-'))
parts.append('%s: %s\n' % (k, v))
return parts
def find_closest_request(self, request, use_path=False):
"""Find the closest matching request in the archive to the given request.
Args:
request: an ArchivedHttpRequest
use_path: If True, closest matching request's path component must match.
(Note: this refers to the 'path' component within the URL, not the
query string component.)
If use_path=False, candidate will NOT match in example below
e.g. request = GET www.test.com/path?aaa
candidate = GET www.test.com/diffpath?aaa
Returns:
If a close match is found, return the instance of ArchivedHttpRequest.
Otherwise, return None.
"""
best_match = None
request_lines = self._format_request_lines(request)
matcher = difflib.SequenceMatcher(b=''.join(request_lines))
path = None
if use_path:
path = request.path
for candidate in self.get_requests(request.command, request.host, path,
use_query=not use_path):
candidate_lines = self._format_request_lines(candidate)
matcher.set_seq1(''.join(candidate_lines))
best_match = max(best_match, (matcher.ratio(), candidate))
if best_match:
return best_match[1]
return None
def diff(self, request):
"""Diff the given request to the closest matching request in the archive.
Args:
request: an ArchivedHttpRequest
Returns:
If a close match is found, return a textual diff between the requests.
Otherwise, return None.
"""
request_lines = self._format_request_lines(request)
closest_request = self.find_closest_request(request)
if closest_request:
closest_request_lines = self._format_request_lines(closest_request)
return ''.join(difflib.ndiff(closest_request_lines, request_lines))
return None
class ArchivedHttpRequest(object):
"""Record all the state that goes into a request.
ArchivedHttpRequest instances are considered immutable so they can
serve as keys for HttpArchive instances.
(The immutability is not enforced.)
Upon creation, the headers are "trimmed" (i.e. edited or dropped)
and saved to self.trimmed_headers to allow requests to match in a wider
variety of playback situations (e.g. using different user agents).
For unpickling, 'trimmed_headers' is recreated from 'headers'. That
allows for changes to the trim function and can help with debugging.
"""
CONDITIONAL_HEADERS = [
'if-none-match', 'if-match',
'if-modified-since', 'if-unmodified-since']
def __init__(self, command, host, path, request_body, headers, is_ssl=False):
"""Initialize an ArchivedHttpRequest.
Args:
command: a string (e.g. 'GET' or 'POST').
host: a host name (e.g. 'www.google.com').
path: a request path (e.g. '/search?q=dogs').
request_body: a request body string for a POST or None.
headers: {key: value, ...} where key and value are strings.
is_ssl: a boolean which is True iff request is make via SSL.
"""
self.command = command
self.host = host
self.path = path
self.request_body = request_body
self.headers = headers
self.is_ssl = is_ssl
self.trimmed_headers = self._TrimHeaders(headers)
def __str__(self):
scheme = 'https' if self.is_ssl else 'http'
return '%s %s://%s%s %s' % (
self.command, scheme, self.host, self.path, self.trimmed_headers)
def __repr__(self):
return repr((self.command, self.host, self.path, self.request_body,
self.trimmed_headers, self.is_ssl))
def __hash__(self):
"""Return a integer hash to use for hashed collections including dict."""
return hash(repr(self))
def __eq__(self, other):
"""Define the __eq__ method to match the hash behavior."""
return repr(self) == repr(other)
def __setstate__(self, state):
"""Influence how to unpickle.
"headers" are the original request headers.
"trimmed_headers" are the trimmed headers used for matching requests
during replay.
Args:
state: a dictionary for __dict__
"""
if 'full_headers' in state:
# Fix older version of archive.
state['headers'] = state['full_headers']
del state['full_headers']
if 'headers' not in state:
raise HttpArchiveException(
'Archived HTTP request is missing "headers". The HTTP archive is'
' likely from a previous version and must be re-recorded.')
state['trimmed_headers'] = self._TrimHeaders(dict(state['headers']))
if 'is_ssl' not in state:
state['is_ssl'] = False
self.__dict__.update(state)
def __getstate__(self):
"""Influence how to pickle.
Returns:
a dict to use for pickling
"""
state = self.__dict__.copy()
del state['trimmed_headers']
return state
def matches(self, command=None, host=None, path_with_query=None,
use_query=True):
"""Returns true iff the request matches all parameters.
Args:
command: a string (e.g. 'GET' or 'POST').
host: a host name (e.g. 'www.google.com').
path_with_query: a request path with query string (e.g. '/search?q=dogs')
use_query:
If use_query is True, request matching uses both the hierarchical path
and query string component.
If use_query is False, request matching only uses the hierarchical path
e.g. req1 = GET www.test.com/index?aaaa
req2 = GET www.test.com/index?bbbb
If use_query is True, req1.matches(req2) evaluates to False
If use_query is False, req1.matches(req2) evaluates to True
Returns:
True iff the request matches all parameters
"""
path_match = path_with_query == self.path
if not use_query:
self_path = urlparse.urlparse('http://%s%s' % (
self.host or '', self.path or '')).path
other_path = urlparse.urlparse('http://%s%s' % (
host or '', path_with_query or '')).path
path_match = self_path == other_path
return ((command is None or command == self.command) and
(host is None or host == self.host) and
(path_with_query is None or path_match))
@classmethod
def _TrimHeaders(cls, headers):
"""Removes headers that are known to cause problems during replay.
These headers are removed for the following reasons:
- accept: Causes problems with www.bing.com. During record, CSS is fetched
with *. During replay, it's text/css.
- accept-charset, accept-language, referer: vary between clients.
- connection, method, scheme, url, version: Cause problems with spdy.
- cookie: Extremely sensitive to request/response order.
- keep-alive: Not supported by Web Page Replay.
- user-agent: Changes with every Chrome version.
- proxy-connection: Sent for proxy requests.
Another variant to consider is dropping only the value from the header.
However, this is particularly bad for the cookie header, because the
presence of the cookie depends on the responses we've seen when the request
is made.
Args:
headers: {header_key: header_value, ...}
Returns:
[(header_key, header_value), ...] # (with undesirable headers removed)
"""
# TODO(tonyg): Strip sdch from the request headers because we can't
# guarantee that the dictionary will be recorded, so replay may not work.
if 'accept-encoding' in headers:
headers['accept-encoding'] = headers['accept-encoding'].replace(
'sdch', '')
# A little clean-up
if headers['accept-encoding'].endswith(','):
headers['accept-encoding'] = headers['accept-encoding'][:-1]
undesirable_keys = [
'accept', 'accept-charset', 'accept-language',
'connection', 'cookie', 'keep-alive', 'method',
'referer', 'scheme', 'url', 'version', 'user-agent', 'proxy-connection']
return sorted([(k, v) for k, v in headers.items()
if k.lower() not in undesirable_keys])
def is_conditional(self):
"""Return list of headers that match conditional headers."""
for header in self.CONDITIONAL_HEADERS:
if header in self.headers:
return True
return False
def create_request_without_conditions(self):
stripped_headers = dict((k, v) for k, v in self.headers.iteritems()
if k.lower() not in self.CONDITIONAL_HEADERS)
return ArchivedHttpRequest(
self.command, self.host, self.path, self.request_body,
stripped_headers, self.is_ssl)
class ArchivedHttpResponse(object):
"""All the data needed to recreate all HTTP response."""
# CHUNK_EDIT_SEPARATOR is used to edit and view text content.
# It is not sent in responses. It is added by get_data_as_text()
# and removed by set_data().
CHUNK_EDIT_SEPARATOR = '[WEB_PAGE_REPLAY_CHUNK_BOUNDARY]'
# DELAY_EDIT_SEPARATOR is used to edit and view server delays.
DELAY_EDIT_SEPARATOR = ('\n[WEB_PAGE_REPLAY_EDIT_ARCHIVE --- '
'Delays are above. Response content is below.]\n')
def __init__(self, version, status, reason, headers, response_data,
delays=None):
"""Initialize an ArchivedHttpResponse.
Args:
version: HTTP protocol version used by server.
10 for HTTP/1.0, 11 for HTTP/1.1 (same as httplib).
status: Status code returned by server (e.g. 200).
reason: Reason phrase returned by server (e.g. "OK").
headers: list of (header, value) tuples.
response_data: list of content chunks.
Concatenating the chunks gives the complete contents
(i.e. the chunks do not have any lengths or delimiters).
Do not include the final, zero-length chunk that marks the end.
delays: dict of (ms) delays before "headers" and "data". For example,
{'headers': 50, 'data': [0, 10, 10]}
"""
self.version = version
self.status = status
self.reason = reason
self.headers = headers
self.response_data = response_data
self.delays = delays
self.fix_delays()
def fix_delays(self):
"""Initialize delays, or check the number of data delays."""
expected_num_delays = len(self.response_data)
if not self.delays:
self.delays = {
'headers': 0,
'data': [0] * expected_num_delays
}
else:
num_delays = len(self.delays['data'])
if num_delays != expected_num_delays:
raise HttpArchiveException(
'Server delay length mismatch: %d (expected %d): %s',
num_delays, expected_num_delays, self.delays['data'])
def __repr__(self):
return repr((self.version, self.status, self.reason, sorted(self.headers),
self.response_data))
def __hash__(self):
"""Return a integer hash to use for hashed collections including dict."""
return hash(repr(self))
def __eq__(self, other):
"""Define the __eq__ method to match the hash behavior."""
return repr(self) == repr(other)
def __setstate__(self, state):
"""Influence how to unpickle.
Args:
state: a dictionary for __dict__
"""
if 'server_delays' in state:
state['delays'] = {
'headers': 0,
'data': state['server_delays']
}
del state['server_delays']
elif 'delays' not in state:
state['delays'] = None
self.__dict__.update(state)
self.fix_delays()
def get_header(self, key, default=None):
for k, v in self.headers:
if key == k:
return v
return default
def get_header_case_insensitive(self, key):
for k, v in self.headers:
if key.lower() == k.lower():
return v
return None
def set_header(self, key, value):
for i, (k, v) in enumerate(self.headers):
if key == k:
self.headers[i] = (key, value)
return
self.headers.append((key, value))
def remove_header(self, key):
for i, (k, v) in enumerate(self.headers):
if key == k:
self.headers.pop(i)
return
def is_gzip(self):
return self.get_header('content-encoding') == 'gzip'
def is_compressed(self):
return self.get_header('content-encoding') in ('gzip', 'deflate')
def is_chunked(self):
return self.get_header('transfer-encoding') == 'chunked'
def get_data_as_text(self):
"""Return content as a single string.
Uncompresses and concatenates chunks with CHUNK_EDIT_SEPARATOR.
"""
content_type = self.get_header('content-type')
if (not content_type or
not (content_type.startswith('text/') or
content_type == 'application/x-javascript')):
return None
if self.is_compressed():
uncompressed_chunks = httpzlib.uncompress_chunks(
self.response_data, self.is_gzip())
else:
uncompressed_chunks = self.response_data
return self.CHUNK_EDIT_SEPARATOR.join(uncompressed_chunks)
def get_delays_as_text(self):
"""Return delays as editable text."""
return json.dumps(self.delays, indent=2)
def get_response_as_text(self):
"""Returns response content as a single string.
Server delays are separated on a per-chunk basis. Delays are in seconds.
Response content begins after DELAY_EDIT_SEPARATOR
"""
data = self.get_data_as_text()
if data is None:
logging.warning('Data can not be represented as text.')
data = ''
delays = self.get_delays_as_text()
return self.DELAY_EDIT_SEPARATOR.join((delays, data))
def set_data(self, text):
"""Inverse of get_data_as_text().
Split on CHUNK_EDIT_SEPARATOR and compress if needed.
"""
text_chunks = text.split(self.CHUNK_EDIT_SEPARATOR)
if self.is_compressed():
self.response_data = httpzlib.compress_chunks(text_chunks, self.is_gzip())
else:
self.response_data = text_chunks
if not self.is_chunked():
content_length = sum(len(c) for c in self.response_data)
self.set_header('content-length', str(content_length))
def set_delays(self, delays_text):
"""Inverse of get_delays_as_text().
Args:
delays_text: JSON encoded text such as the following:
{
headers: 80,
data: [6, 55, 0]
}
Times are in milliseconds.
Each data delay corresponds with one response_data value.
"""
try:
self.delays = json.loads(delays_text)
except (ValueError, KeyError) as e:
logging.critical('Unable to parse delays %s: %s', delays_text, e)
self.fix_delays()
def set_response_from_text(self, text):
"""Inverse of get_response_as_text().
Modifies the state of the archive according to the textual representation.
"""
try:
delays, data = text.split(self.DELAY_EDIT_SEPARATOR)
except ValueError:
logging.critical(
'Error parsing text representation. Skipping edits.')
return
self.set_delays(delays)
self.set_data(data)
def create_response(status, reason=None, headers=None, body=None):
"""Convenience method for creating simple ArchivedHttpResponse objects."""
if reason is None:
reason = httplib.responses.get(status, 'Unknown')
if headers is None:
headers = [('content-type', 'text/plain')]
if body is None:
body = "%s %s" % (status, reason)
return ArchivedHttpResponse(11, status, reason, headers, [body])
def main():
class PlainHelpFormatter(optparse.IndentedHelpFormatter):
def format_description(self, description):
if description:
return description + '\n'
else:
return ''
option_parser = optparse.OptionParser(
usage='%prog [ls|cat|edit] [options] replay_file',
formatter=PlainHelpFormatter(),
description=__doc__,
epilog='http://code.google.com/p/web-page-replay/')
option_parser.add_option('-c', '--command', default=None,
action='store',
type='string',
help='Only show URLs matching this command.')
option_parser.add_option('-o', '--host', default=None,
action='store',
type='string',
help='Only show URLs matching this host.')
option_parser.add_option('-p', '--path', default=None,
action='store',
type='string',
help='Only show URLs matching this path.')
options, args = option_parser.parse_args()
if len(args) != 2:
print 'args: %s' % args
option_parser.error('Must specify a command and replay_file')
command = args[0]
replay_file = args[1]
if not os.path.exists(replay_file):
option_parser.error('Replay file "%s" does not exist' % replay_file)
http_archive = HttpArchive.Load(replay_file)
if command == 'ls':
print http_archive.ls(options.command, options.host, options.path)
elif command == 'cat':
print http_archive.cat(options.command, options.host, options.path)
elif command == 'edit':
http_archive.edit(options.command, options.host, options.path)
http_archive.Persist(replay_file)
else:
option_parser.error('Unknown command "%s"' % command)
return 0
if __name__ == '__main__':
sys.exit(main())
| mpl-2.0 | 3,875,306,886,947,456,500 | 33.707713 | 80 | 0.644859 | false |
CLVsol/odoo_addons | clv_medicament_list/item/__init__.py | 1 | 1436 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import clv_medicament_list_item
| agpl-3.0 | -4,137,553,413,951,315,500 | 70.8 | 80 | 0.410864 | false |
georgistanev/django-dash | src/dash/contrib/plugins/rss_feed/dash_plugins.py | 1 | 1179 | __author__ = 'Artur Barseghyan <[email protected]>'
__copyright__ = 'Copyright (c) 2013 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('BaseReadRSSFeedPlugin',)
from django.utils.translation import ugettext_lazy as _
from dash.base import BaseDashboardPlugin
from dash.factory import plugin_factory
from dash.contrib.plugins.rss_feed.forms import ReadRSSFeedForm
# ********************************************************************************
# ********************************* Base Read RSS feed plugin ********************
# ********************************************************************************
class BaseReadRSSFeedPlugin(BaseDashboardPlugin):
"""
Base Read RSS feed into HTML plugin.
"""
name = _("Read RSS feed")
form = ReadRSSFeedForm
group = _("Internet")
# ********************************************************************************
# ********** Generating and registering the plugins using factory ****************
# ********************************************************************************
sizes = (
(2, 3),
(3, 3),
)
plugin_factory(BaseReadRSSFeedPlugin, 'read_rss_feed', sizes)
| gpl-2.0 | 3,955,246,857,589,229,600 | 35.84375 | 82 | 0.46056 | false |
cleobulo/site-mapp2 | crawl/linkanalyzer.py | 1 | 4873 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#author Cleóbulo Bezerra < [email protected] >
#file Defines a set of proceeds to verify the authenticity of the urls
from re import match, split, sub, search
from Queue import Queue
from urlparse import urljoin, urlparse
def init():
"""
Initializes the global variables required for build
a links parser. The goal of this parser should be to
verify that a URL is authentic.
1 - Verify the url syntax and classify them as relative or absolute;
2 - If possible, authenticate these URLs considered valid;
3 - Filter URLs, that is, maintain only those that lead to html pages.
"""
#Reserved Characters
global GEN_DELIMS
global SUB_DELIMS
global RESERVED
GEN_DELIMS = r"[/:\?#\[\]@]"
SUB_DELIMS = r"[!\$&\+\*,;`\(\)]"
RESERVED = GEN_DELIMS + "|" + SUB_DELIMS
#Unreserved Characters
global UNRESERVED
UNRESERVED = r"[\w\.-_~]"
#PATH (sub) components
global PCHAR
global SEGMENT
global SEGMENT_NZ
global SEGMENT_NZ_NC
PCHAR = r"(" + UNRESERVED + "|" + SUB_DELIMS + "|:|@)"
SEGMENT = PCHAR + "*"
SEGMENT_NZ = PCHAR + "+"
SEGMENT_NZ_NC = r"(" + UNRESERVED + "|" + SUB_DELIMS + "|@)+"
global PATH_ABEMPTY
global PATH_ABSOLUTE
global PATH_NOSCHEME
global PATH_ROOTLESS
PATH_ABEMPTY = r"(/" + SEGMENT + ")*"
PATH_ABSOLUTE = r"/(" + SEGMENT_NZ + "(/" + SEGMENT + ")*)"
PATH_NOSCHEME = SEGMENT_NZ_NC + "(/" + SEGMENT + ")*"
PATH_ROOTLESS = SEGMENT_NZ + "(/" + SEGMENT + ")*"
#The three main components of the syntactic structure of a URL.
global SCHEME
global HOST
global PATH
SCHEME = r"http(s)?:"
HOST = r"//(" + UNRESERVED + "|" + SUB_DELIMS + ")*"
PATH = r"(" + PATH_ABEMPTY + "|" + PATH_ABSOLUTE + "|" + PATH_NOSCHEME + "|" + PATH_ROOTLESS + "|)"
class _Token:
"""
This class represents each component of a URL syntax.
"""
def __init__(self, token):
self.__token = token
@property
def token(self):
return self.__token
class _UrlSyntaxTree:
"""
Represents a URL Syntax Tree for URL analysis. The goal is try to correct
or authenticate the URLs collected on the Web by some program or an user
input.
"""
def __init__(self):
self.__leftchild = None
self.__middlechild = None
self.__rightchild = None
def __urlsplit(self, url):
return split('([^\?#:/]+:|//[^\?#/]*)', url)
def build_absolute_url(self, url):
urlcomp = self.__urlsplit(url)
queuecomp = Queue()
for comp in urlcomp:
if comp != '':
queuecomp.put(_Token(comp))
while not queuecomp.empty():
currcomp = queuecomp.get()
if match(SCHEME, currcomp.token):
self.__leftchild = currcomp
elif match(HOST, currcomp.token):
self.__middlechild = currcomp
elif match(PATH, currcomp.token):
self.build_relative_url(currcomp.token)
def get_absolute_url(self):
if self.__leftchild != None and self.__middlechild != None and self.__rightchild != None:
return self.__leftchild.token + self.__middlechild.token + self.__rightchild.token
elif self.__leftchild != None and self.__middlechild != None:
return self.__leftchild.token + sub('(/|)$', '/', self.__middlechild.token)
else:
return None
def build_relative_url(self, path):
urlcomp = _Token(path)
if match(PATH, urlcomp.token):
self.__rightchild = urlcomp
def get_relative_url(self):
if self.get_absolute_url() == None and self.__rightchild != None:
return self.__rightchild.token
else:
return None
class LinkAnalyzer:
"""
Represents an object for URL analysis. This object seeks to
perform the syntax analysis and the filtering of these URLs.
"""
def __init__(self):
init()
def urleval(self, curl, furl = '/'):
self.__urltree = _UrlSyntaxTree()
url = curl if furl == '/' else furl
if match(SCHEME + HOST, url):
self.__urltree.build_absolute_url(url)
return self.__urltree.get_absolute_url()
elif match(PATH, url):
self.__urltree.build_relative_url(url)
return urljoin(curl, self.__urltree.get_relative_url())
return None
def urlfilter(self, url):
if search('^http(s)?://[^\?#/]+$', url):
return True
elif search('(/[^\?#\.:/]*)$', url):
return True
elif search('(\.html|\.htm|\.php|\.asp|\.jsp)$', url):
return True
else:
return False
| mit | 4,591,837,217,795,273,000 | 27.828402 | 103 | 0.562192 | false |
nojhan/weboob-devel | modules/cuisineaz/pages.py | 1 | 4451 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.recipe import Recipe, Comment
from weboob.capabilities.base import NotAvailable
from weboob.browser.pages import HTMLPage, pagination
from weboob.browser.elements import ItemElement, method, ListElement
from weboob.browser.filters.standard import CleanText, Regexp, Env, Time
from weboob.browser.filters.html import XPath, CleanHTML
import re
import datetime
class CuisineazDuration(Time):
klass = datetime.timedelta
_regexp = re.compile(r'((?P<hh>\d+) h)?((?P<mm>\d+) min)?(?P<ss>\d+)?')
kwargs = {'hours': 'hh', 'minutes': 'mm', 'seconds': 'ss'}
class ResultsPage(HTMLPage):
""" Page which contains results as a list of recipies
"""
@pagination
@method
class iter_recipes(ListElement):
item_xpath = '//div[@id="divRecette"]'
def next_page(self):
next = CleanText('//li[@class="next"]/span/a/@href',
default=None)(self)
if next:
return next
class item(ItemElement):
klass = Recipe
def condition(self):
return Regexp(CleanText('./div[has-class("searchTitle")]/h2/a/@href'),
'/recettes/(.*).aspx',
default=None)(self.el)
obj_id = Regexp(CleanText('./div[has-class("searchTitle")]/h2/a/@href'),
'/recettes/(.*).aspx')
obj_title = CleanText('./div[has-class("searchTitle")]/h2/a')
obj_thumbnail_url = CleanText('./div[has-class("searchImg")]/span/img[@data-src!=""]/@data-src|./div[has-class("searchImg")]/div/span/img[@src!=""]/@src',
default=None)
obj_short_description = CleanText('./div[has-class("searchIngredients")]')
class RecipePage(HTMLPage):
""" Page which contains a recipe
"""
@method
class get_recipe(ItemElement):
klass = Recipe
obj_id = Env('_id')
obj_title = CleanText('//div[@id="ficheRecette"]/h1')
obj_picture_url = CleanText('//img[@id="shareimg" and @src!=""]/@src', default=None)
obj_thumbnail_url = CleanText('//img[@id="shareimg" and @src!=""]/@src', default=None)
def obj_preparation_time(self):
_prep = CuisineazDuration(CleanText('//span[@id="ctl00_ContentPlaceHolder_LblRecetteTempsPrepa"]'))(self)
return int(_prep.total_seconds() / 60)
def obj_cooking_time(self):
_cook = CuisineazDuration(CleanText('//span[@id="ctl00_ContentPlaceHolder_LblRecetteTempsCuisson"]'))(self)
return int(_cook.total_seconds() / 60)
def obj_nb_person(self):
nb_pers = CleanText('//span[@id="ctl00_ContentPlaceHolder_LblRecetteNombre"]')(self)
return [nb_pers] if nb_pers else NotAvailable
def obj_ingredients(self):
ingredients = []
for el in XPath('//div[@id="ingredients"]/ul/li')(self):
ingredients.append(CleanText('.')(el))
return ingredients
obj_instructions = CleanHTML('//div[@id="preparation"]/span[@class="instructions"]')
@method
class get_comments(ListElement):
item_xpath = '//div[@class="comment pb15 row"]'
class item(ItemElement):
klass = Comment
obj_author = CleanText('./div[has-class("comment-left")]/div/div/div[@class="fs18 txtcaz mb5 first-letter"]')
obj_text = CleanText('./div[has-class("comment-right")]/div/p')
obj_id = CleanText('./@id')
def obj_rate(self):
return len(XPath('./div[has-class("comment-right")]/div/div/div/span/span[@class="icon icon-star"]')(self))
| agpl-3.0 | 3,232,280,561,110,571,500 | 36.720339 | 166 | 0.611773 | false |
jhseu/tensorflow | tensorflow/python/debug/lib/source_utils.py | 1 | 13967 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions that help to inspect Python source w.r.t. TF graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
import zipfile
import absl
import numpy as np
from tensorflow.python.debug.lib import profiling
_TENSORFLOW_BASEDIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.normpath(os.path.abspath(__file__))))))
_ABSL_BASEDIR = os.path.dirname(absl.__file__)
UNCOMPILED_SOURCE_SUFFIXES = (".py")
COMPILED_SOURCE_SUFFIXES = (".pyc", ".pyo")
def _norm_abs_path(file_path):
return os.path.normpath(os.path.abspath(file_path))
def is_extension_uncompiled_python_source(file_path):
_, extension = os.path.splitext(file_path)
return extension.lower() in UNCOMPILED_SOURCE_SUFFIXES
def is_extension_compiled_python_source(file_path):
_, extension = os.path.splitext(file_path)
return extension.lower() in COMPILED_SOURCE_SUFFIXES
def _convert_watch_key_to_tensor_name(watch_key):
return watch_key[:watch_key.rfind(":")]
def guess_is_tensorflow_py_library(py_file_path):
"""Guess whether a Python source file is a part of the tensorflow library.
Special cases:
1) Returns False for unit-test files in the library (*_test.py),
2) Returns False for files under python/debug/examples.
Args:
py_file_path: full path of the Python source file in question.
Returns:
(`bool`) Whether the file is a part of the tensorflow library.
Raises:
ValueError: if the extension name of py_file_path does not indicate a Python
source file (compiled or uncomplied).
"""
if (not is_extension_uncompiled_python_source(py_file_path) and
not is_extension_compiled_python_source(py_file_path)):
raise ValueError(
"Input file path (%s) is not a Python source file." % py_file_path)
py_file_path = _norm_abs_path(py_file_path)
return ((py_file_path.startswith(_TENSORFLOW_BASEDIR) or
py_file_path.startswith(_ABSL_BASEDIR)) and
not py_file_path.endswith("_test.py") and
(os.path.normpath("tensorflow/python/debug/examples") not in
os.path.normpath(py_file_path)))
def load_source(source_file_path):
"""Load the content of a Python source code file.
This function covers the following case:
1. source_file_path points to an existing Python (.py) file on the
file system.
2. source_file_path is a path within a .par file (i.e., a zip-compressed,
self-contained Python executable).
Args:
source_file_path: Path to the Python source file to read.
Returns:
A length-2 tuple:
- Lines of the source file, as a `list` of `str`s.
- The width of the string needed to show the line number in the file.
This is calculated based on the number of lines in the source file.
Raises:
IOError: if loading is unsuccessful.
"""
if os.path.isfile(source_file_path):
with open(source_file_path, "rb") as f:
source_text = f.read().decode("utf-8")
source_lines = source_text.split("\n")
else:
# One possible reason why the file doesn't exist is that it's a path
# inside a .par file. Try that possibility.
source_lines = _try_load_par_source(source_file_path)
if source_lines is None:
raise IOError(
"Source path neither exists nor can be loaded as a .par file: %s" %
source_file_path)
line_num_width = int(np.ceil(np.log10(len(source_lines)))) + 3
return source_lines, line_num_width
def _try_load_par_source(source_file_path):
"""Try loading the source code inside a .par file.
A .par file is a zip-compressed, self-contained Python executable.
It contains the content of individual Python source files that can
be read only through extracting from the zip file.
Args:
source_file_path: The full path to the file inside the .par file. This
path should include the path to the .par file itself, followed by the
intra-par path, e.g.,
"/tmp/my_executable.par/org-tensorflow/tensorflow/python/foo/bar.py".
Returns:
If successful, lines of the source file as a `list` of `str`s.
Else, `None`.
"""
prefix_path = source_file_path
while True:
prefix_path, basename = os.path.split(prefix_path)
if not basename:
break
suffix_path = os.path.normpath(
os.path.relpath(source_file_path, start=prefix_path))
if prefix_path.endswith(".par") and os.path.isfile(prefix_path):
with zipfile.ZipFile(prefix_path) as z:
norm_names = [os.path.normpath(name) for name in z.namelist()]
if suffix_path in norm_names:
with z.open(z.namelist()[norm_names.index(suffix_path)]) as zf:
source_text = zf.read().decode("utf-8")
return source_text.split("\n")
def annotate_source(dump,
source_file_path,
do_dumped_tensors=False,
file_stack_top=False,
min_line=None,
max_line=None):
"""Annotate a Python source file with a list of ops created at each line.
(The annotation doesn't change the source file itself.)
Args:
dump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph
has been loaded.
source_file_path: (`str`) Path to the source file being annotated.
do_dumped_tensors: (`str`) Whether dumped Tensors, instead of ops are to be
used to annotate the source file.
file_stack_top: (`bool`) Whether only the top stack trace in the
specified source file is to be annotated.
min_line: (`None` or `int`) The 1-based line to start annotate the source
file from (inclusive).
max_line: (`None` or `int`) The 1-based line number to end the annotation
at (exclusive).
Returns:
A `dict` mapping 1-based line number to a list of op name(s) created at
that line, or tensor names if `do_dumped_tensors` is True.
Raises:
ValueError: If the dump object does not have a Python graph set.
"""
py_graph = dump.python_graph
if not py_graph:
raise ValueError("Cannot perform source annotation due to a lack of set "
"Python graph in the dump object")
source_file_path = _norm_abs_path(source_file_path)
line_to_op_names = {}
for op in py_graph.get_operations():
for file_path, line_number, _, _ in reversed(dump.node_traceback(op.name)):
if (min_line is not None and line_number < min_line or
max_line is not None and line_number >= max_line):
continue
if _norm_abs_path(file_path) != source_file_path:
continue
if do_dumped_tensors:
watch_keys = dump.debug_watch_keys(op.name)
# Convert watch keys to unique Tensor names.
items_to_append = list(
set(map(_convert_watch_key_to_tensor_name, watch_keys)))
else:
items_to_append = [op.name]
if line_number in line_to_op_names:
line_to_op_names[line_number].extend(items_to_append)
else:
line_to_op_names[line_number] = items_to_append
if file_stack_top:
break
return line_to_op_names
def list_source_files_against_dump(dump,
path_regex_whitelist=None,
node_name_regex_whitelist=None):
"""Generate a list of source files with information regarding ops and tensors.
Args:
dump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph
has been loaded.
path_regex_whitelist: A regular-expression filter for source file path.
node_name_regex_whitelist: A regular-expression filter for node names.
Returns:
A list of tuples regarding the Python source files involved in constructing
the ops and tensors contained in `dump`. Each tuple is:
(source_file_path, is_tf_library, num_nodes, num_tensors, num_dumps,
first_line)
is_tf_library: (`bool`) A guess of whether the file belongs to the
TensorFlow Python library.
num_nodes: How many nodes were created by lines of this source file.
These include nodes with dumps and those without.
num_tensors: How many Tensors were created by lines of this source file.
These include Tensors with dumps and those without.
num_dumps: How many debug Tensor dumps were from nodes (and Tensors)
that were created by this source file.
first_line: The first line number (1-based) that created any nodes or
Tensors in this source file.
The list is sorted by ascending order of source_file_path.
Raises:
ValueError: If the dump object does not have a Python graph set.
"""
py_graph = dump.python_graph
if not py_graph:
raise ValueError("Cannot generate source list due to a lack of set "
"Python graph in the dump object")
path_to_node_names = collections.defaultdict(set)
path_to_tensor_names = collections.defaultdict(set)
path_to_first_line = {}
tensor_name_to_num_dumps = {}
path_regex = (re.compile(path_regex_whitelist)
if path_regex_whitelist else None)
node_name_regex = (re.compile(node_name_regex_whitelist)
if node_name_regex_whitelist else None)
to_skip_file_paths = set()
for op in py_graph.get_operations():
if node_name_regex and not node_name_regex.match(op.name):
continue
for file_path, line_number, _, _ in dump.node_traceback(op.name):
file_path = _norm_abs_path(file_path)
if (file_path in to_skip_file_paths or
path_regex and not path_regex.match(file_path) or
not os.path.isfile(file_path)):
to_skip_file_paths.add(file_path)
continue
path_to_node_names[file_path].add(op.name)
if file_path in path_to_first_line:
if path_to_first_line[file_path] > line_number:
path_to_first_line[file_path] = line_number
else:
path_to_first_line[file_path] = line_number
for output_tensor in op.outputs:
tensor_name = output_tensor.name
path_to_tensor_names[file_path].add(tensor_name)
watch_keys = dump.debug_watch_keys(op.name)
for watch_key in watch_keys:
node_name, output_slot, debug_op = watch_key.split(":")
tensor_name = "%s:%s" % (node_name, output_slot)
if tensor_name not in tensor_name_to_num_dumps:
tensor_name_to_num_dumps[tensor_name] = len(
dump.get_tensors(node_name, int(output_slot), debug_op))
path_to_num_dumps = {}
for path in path_to_tensor_names:
path_to_num_dumps[path] = sum(
tensor_name_to_num_dumps.get(tensor_name, 0)
for tensor_name in path_to_tensor_names[path])
output = []
for file_path in path_to_node_names:
output.append((
file_path,
guess_is_tensorflow_py_library(file_path),
len(path_to_node_names.get(file_path, {})),
len(path_to_tensor_names.get(file_path, {})),
path_to_num_dumps.get(file_path, 0),
path_to_first_line[file_path]))
return sorted(output, key=lambda x: x[0])
def annotate_source_against_profile(profile_data,
source_file_path,
node_name_filter=None,
op_type_filter=None,
min_line=None,
max_line=None):
"""Annotate a Python source file with profiling information at each line.
(The annotation doesn't change the source file itself.)
Args:
profile_data: (`list` of `ProfileDatum`) A list of `ProfileDatum`.
source_file_path: (`str`) Path to the source file being annotated.
node_name_filter: Regular expression to filter by node name.
op_type_filter: Regular expression to filter by op type.
min_line: (`None` or `int`) The 1-based line to start annotate the source
file from (inclusive).
max_line: (`None` or `int`) The 1-based line number to end the annotation
at (exclusive).
Returns:
A `dict` mapping 1-based line number to a the namedtuple
`profiling.LineOrFuncProfileSummary`.
"""
source_file_path = _norm_abs_path(source_file_path)
node_name_regex = re.compile(node_name_filter) if node_name_filter else None
op_type_regex = re.compile(op_type_filter) if op_type_filter else None
line_to_profile_summary = {}
for profile_datum in profile_data:
if not profile_datum.file_path:
continue
if _norm_abs_path(profile_datum.file_path) != source_file_path:
continue
if (min_line is not None and profile_datum.line_number < min_line or
max_line is not None and profile_datum.line_number >= max_line):
continue
if (node_name_regex and
not node_name_regex.match(profile_datum.node_exec_stats.node_name)):
continue
if op_type_regex and not op_type_regex.match(profile_datum.op_type):
continue
if profile_datum.line_number not in line_to_profile_summary:
line_to_profile_summary[profile_datum.line_number] = (
profiling.AggregateProfile(profile_datum))
else:
line_to_profile_summary[profile_datum.line_number].add(profile_datum)
return line_to_profile_summary
| apache-2.0 | 5,331,100,872,048,130,000 | 35.467363 | 80 | 0.658695 | false |
markreidvfx/pyaaf_old | docs/parse_aaf_header.py | 1 | 2348 |
import pickle
def parse_aaf_header(header_path, dest_path=None):
if not dest_path:
dest_path = 'docs.pkl'
f = open(header_path, 'r')
header = f.read()
f.close()
comments = ""
interface = {}
current = None
for line in header.splitlines():
if line.count("//"):
if line.count("// Interface"):
current = line.replace("// Interface",'').strip()
current = current.replace('IEnumAAF','AxIter.').replace("IAAF",'Ax')
if current:
if not interface.has_key(current):
interface[current] = ""
interface[current] += line
interface[current] += '\n'
doc_dict = {}
for item, value in sorted(interface.items()):
for i in value.split("//***********************************************************"):
lines = i.splitlines()
method = None
try:
line2 = lines[2]
if line2.count("("):
method = line2.replace("//",'').replace("(",'').replace(")","").strip()
except:
pass
if method:
if not doc_dict.has_key(item):
doc_dict[item] = {}
doc = ""
for l in lines[3:]:
doc_line = """ ///"""
if l.count(doc_line):
doc += l.replace(doc_line,'') + '\n'
doc_dict[item][method] = doc
#"\n".join(lines[3:])
for key,value in sorted(doc_dict.items()):
print key
for method,docs in value.items():
print key,'::',method
print docs
pickle.dump(doc_dict,open(dest_path, 'w'),pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
(options, args) = parser.parse_args()
header_path = args[0]
dest_path = None
if len(args) > 1:
dest_path = args[1]
parse_aaf_header(header_path,dest_path)
| mit | -8,351,774,476,693,415,000 | 25.681818 | 94 | 0.405877 | false |
facebook/mcrouter | mcrouter/test/test_latency_injection_route.py | 1 | 1957 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestLatencyInjectionRoute(McrouterTestCase):
config_latency_before = './mcrouter/test/test_latency_injection_before.json'
config_latency_after = './mcrouter/test/test_latency_injection_before.json'
config_latency_total = './mcrouter/test/test_latency_injection_before.json'
def setUp(self) -> None:
self.mc = self.add_server(Memcached())
self.mcrouter_latency_before =\
self.add_mcrouter(self.config_latency_before)
self.mcrouter_latency_after =\
self.add_mcrouter(self.config_latency_after)
self.mcrouter_latency_total =\
self.add_mcrouter(self.config_latency_total)
def test_latency_before(self) -> None:
self.mc.set("key1", "value1")
t_start = datetime.now()
self.assertEqual("value1", self.mcrouter_latency_before.get("key1"))
t_end = datetime.now()
duration = t_end - t_start
self.assertGreaterEqual(duration.total_seconds(), 2)
def test_latency_after(self) -> None:
self.mc.set("key2", "value2")
t_start = datetime.now()
self.assertTrue("value2", self.mcrouter_latency_after.get("key2"))
t_end = datetime.now()
duration = t_end - t_start
self.assertGreaterEqual(duration.total_seconds(), 1)
def test_latency_total(self) -> None:
self.mc.set("key3", "value3")
t_start = datetime.now()
self.assertTrue("value3", self.mcrouter_latency_total.get("key3"))
t_end = datetime.now()
duration = t_end - t_start
self.assertGreaterEqual(duration.total_seconds(), 1)
| mit | 7,733,254,879,666,440,000 | 33.946429 | 80 | 0.663771 | false |
gooddata/openstack-nova | nova/conf/vnc.py | 1 | 9039 | # Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import types
vnc_group = cfg.OptGroup(
'vnc',
title='VNC options',
help="""
Virtual Network Computer (VNC) can be used to provide remote desktop
console access to instances for tenants and/or administrators.""")
ALL_OPTS = [
cfg.BoolOpt(
'enabled',
default=True,
deprecated_group='DEFAULT',
deprecated_name='vnc_enabled',
help="""
Enable VNC related features.
Guests will get created with graphical devices to support this. Clients
(for example Horizon) can then establish a VNC connection to the guest.
"""),
cfg.StrOpt(
'keymap',
deprecated_group='DEFAULT',
deprecated_name='vnc_keymap',
deprecated_for_removal=True,
deprecated_since='18.0.0',
deprecated_reason="""
Configuring this option forces QEMU to do keymap conversions. These conversions
are lossy and can result in significant issues for users of non en-US
keyboards. You should instead use a VNC client that supports Extended Key Event
messages, such as noVNC 1.0.0. Refer to bug #1682020 for more information.""",
help="""
Keymap for VNC.
The keyboard mapping (keymap) determines which keyboard layout a VNC
session should use by default.
Possible values:
* A keyboard layout which is supported by the underlying hypervisor on
this node. This is usually an 'IETF language tag' (for example
'en-us'). If you use QEMU as hypervisor, you should find the list
of supported keyboard layouts at ``/usr/share/qemu/keymaps``.
"""),
cfg.HostAddressOpt(
'server_listen',
default='127.0.0.1',
deprecated_opts=[
cfg.DeprecatedOpt('vncserver_listen', group='DEFAULT'),
cfg.DeprecatedOpt('vncserver_listen', group='vnc'),
],
help="""
The IP address or hostname on which an instance should listen to for
incoming VNC connection requests on this node.
"""),
cfg.HostAddressOpt(
'server_proxyclient_address',
default='127.0.0.1',
deprecated_opts=[
cfg.DeprecatedOpt('vncserver_proxyclient_address',
group='DEFAULT'),
cfg.DeprecatedOpt('vncserver_proxyclient_address', group='vnc'),
],
help="""
Private, internal IP address or hostname of VNC console proxy.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients.
This option sets the private address to which proxy clients, such as
``nova-xvpvncproxy``, should connect to.
"""),
cfg.URIOpt(
'novncproxy_base_url',
default='http://127.0.0.1:6080/vnc_auto.html',
deprecated_group='DEFAULT',
help="""
Public address of noVNC VNC console proxy.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients. noVNC provides
VNC support through a websocket-based client.
This option sets the public base URL to which client systems will
connect. noVNC clients can use this address to connect to the noVNC
instance and, by extension, the VNC sessions.
If using noVNC >= 1.0.0, you should use ``vnc_lite.html`` instead of
``vnc_auto.html``.
Related options:
* novncproxy_host
* novncproxy_port
"""),
cfg.HostAddressOpt(
'xvpvncproxy_host',
default='0.0.0.0',
deprecated_group='DEFAULT',
help="""
IP address or hostname that the XVP VNC console proxy should bind to.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients. Xen provides
the Xenserver VNC Proxy, or XVP, as an alternative to the
websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
XVP clients are Java-based.
This option sets the private address to which the XVP VNC console proxy
service should bind to.
Related options:
* xvpvncproxy_port
* xvpvncproxy_base_url
"""),
cfg.PortOpt(
'xvpvncproxy_port',
default=6081,
deprecated_group='DEFAULT',
help="""
Port that the XVP VNC console proxy should bind to.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients. Xen provides
the Xenserver VNC Proxy, or XVP, as an alternative to the
websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
XVP clients are Java-based.
This option sets the private port to which the XVP VNC console proxy
service should bind to.
Related options:
* xvpvncproxy_host
* xvpvncproxy_base_url
"""),
cfg.URIOpt(
'xvpvncproxy_base_url',
default='http://127.0.0.1:6081/console',
deprecated_group='DEFAULT',
help="""
Public URL address of XVP VNC console proxy.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients. Xen provides
the Xenserver VNC Proxy, or XVP, as an alternative to the
websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
XVP clients are Java-based.
This option sets the public base URL to which client systems will
connect. XVP clients can use this address to connect to the XVP
instance and, by extension, the VNC sessions.
Related options:
* xvpvncproxy_host
* xvpvncproxy_port
"""),
]
CLI_OPTS = [
cfg.StrOpt(
'novncproxy_host',
default='0.0.0.0',
deprecated_group='DEFAULT',
help="""
IP address that the noVNC console proxy should bind to.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients. noVNC provides
VNC support through a websocket-based client.
This option sets the private address to which the noVNC console proxy
service should bind to.
Related options:
* novncproxy_port
* novncproxy_base_url
"""),
cfg.PortOpt(
'novncproxy_port',
default=6080,
deprecated_group='DEFAULT',
help="""
Port that the noVNC console proxy should bind to.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients. noVNC provides
VNC support through a websocket-based client.
This option sets the private port to which the noVNC console proxy
service should bind to.
Related options:
* novncproxy_host
* novncproxy_base_url
"""),
cfg.ListOpt(
'auth_schemes',
item_type=types.String(choices=(
('none', 'Allow connection without authentication'),
('vencrypt', 'Use VeNCrypt authentication scheme'),
)),
default=['none'],
help="""
The authentication schemes to use with the compute node.
Control what RFB authentication schemes are permitted for connections between
the proxy and the compute host. If multiple schemes are enabled, the first
matching scheme will be used, thus the strongest schemes should be listed
first.
Related options:
* ``[vnc]vencrypt_client_key``, ``[vnc]vencrypt_client_cert``: must also be set
"""),
cfg.StrOpt(
'vencrypt_client_key',
help="""The path to the client certificate PEM file (for x509)
The fully qualified path to a PEM file containing the private key which the VNC
proxy server presents to the compute node during VNC authentication.
Related options:
* ``vnc.auth_schemes``: must include ``vencrypt``
* ``vnc.vencrypt_client_cert``: must also be set
"""),
cfg.StrOpt(
'vencrypt_client_cert',
help="""The path to the client key file (for x509)
The fully qualified path to a PEM file containing the x509 certificate which
the VNC proxy server presents to the compute node during VNC authentication.
Realted options:
* ``vnc.auth_schemes``: must include ``vencrypt``
* ``vnc.vencrypt_client_key``: must also be set
"""),
cfg.StrOpt(
'vencrypt_ca_certs',
help="""The path to the CA certificate PEM file
The fully qualified path to a PEM file containing one or more x509 certificates
for the certificate authorities used by the compute node VNC server.
Related options:
* ``vnc.auth_schemes``: must include ``vencrypt``
"""),
]
ALL_OPTS.extend(CLI_OPTS)
def register_opts(conf):
conf.register_group(vnc_group)
conf.register_opts(ALL_OPTS, group=vnc_group)
def register_cli_opts(conf):
conf.register_cli_opts(CLI_OPTS, group=vnc_group)
def list_opts():
return {vnc_group: ALL_OPTS}
| apache-2.0 | -2,166,768,096,561,270,300 | 29.640678 | 79 | 0.708043 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.