repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
griffinfoster/pulsar-polarization-sims | scripts/fits2dat.py | 1 | 1233 | #!/usr/bin/env python
"""
Convert a PSR FITS file to a headerless dat text file of Stokes' values
"""
import pyfits as pf
import numpy as n
import os
import sys
if __name__ == "__main__":
from optparse import OptionParser
o = OptionParser()
o.set_usage('%prog [options] [FITS file]')
o.set_description(__doc__)
o.add_option('-o', '--output', dest='output', default='template.smooth.dat',
help='Output file name, default:template.smooth.dat')
opts, args = o.parse_args(sys.argv[1:])
#see www.atnf.csiro.au/research/pulsar/psrfists/fitsdef.html section: Subintegration data
hdulist=pf.open(args[0])
d=hdulist[3].data
offsets=d[0][-3]
sclFactor=d[0][-2]
data=d[0][-1]
if len(data.shape)==1:
data.shape=(4,1,data.shape[-1]/4)
dout=n.zeros_like(data, dtype=n.float32)
for sid,stokes in enumerate(sclFactor): dout[sid,0,:]=data[sid,0,:].astype(n.float32)*sclFactor[sid]+offsets[sid]
#n.savetxt(opts.output, dout, fmt='%10.10f')
fh=open(opts.output,'w')
outputstr=''
for i in range(dout.shape[2]):
outputstr+='%10.10f %10.10f %10.10f %10.10f\n'%(dout[0,0,i],dout[1,0,i],dout[2,0,i],dout[3,0,i])
fh.write(outputstr)
fh.close()
| mit | -4,537,864,626,679,160,300 | 30.615385 | 117 | 0.634225 | false |
aplanas/hackweek11 | models.py | 1 | 9821 | #! /usr/bin/env python
import argparse
import csv
from pprint import pprint
import re
from time import time
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.cross_validation import train_test_split
from sklearn.datasets.base import Bunch
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.grid_search import GridSearchCV
# from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.preprocessing import Binarizer, MinMaxScaler
from sklearn.svm import LinearSVC
def fetch_gitlog(data_path, collapse=None, full_data=False, stats=False):
"""Convert the CSV log into a datase suitable for scikit-learn."""
description = 'Lageled git log history'
with open(data_path, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"',
doublequote=True)
next(csvreader)
# Summary, Message, Number of Files, Total Lines, Added Lines, Deleted Lines, Label, SHA-1
full_dataset = [(line[0].strip(), line[1].strip(),
int(line[2]), int(line[3]), int(line[4]),
int(line[5]), line[6], line[7]) for line in csvreader]
if not full_data:
data = np.array([d for d in full_dataset if d[6]])
else:
data = np.array(full_dataset)
collapse = {} if not collapse else collapse
for key, value in collapse.items():
data[data[:, 6] == key, 6] = value
# Encode targets into numbers
target = [d[6] for d in data]
le = LabelEncoder()
le.fit(target)
target_names = le.classes_
target = le.transform(target)
if stats:
print 'Original dataset [%d]' % len(full_dataset)
print 'Labeled dataset [%d]' % len(data)
print 'Ratio [%2.2f%%]' % (100.0 * len(data) / len(full_dataset))
return Bunch(filename=data_path,
data=data,
target_names=target_names,
target=target,
DESCR=description)
class SliceFeature(BaseEstimator):
"""Estimator to slice a feature."""
def __init__(self, slc, astype=None, flatten=False):
"""Build an instance using a slice object.
>>> X = np.array([[1, 2, 3], [10, 20, 30]])
>>> X
array([[ 1, 2, 3],
[10, 20, 30]])
>>> slc = SliceFeature(slice(0, 1))
>>> slc.transform(X)
array([ 1, 10])
>>> slc = SliceFeature(slice(0, 2))
>>> slc.transform(X)
array([[ 1, 2],
[10, 20]])
"""
self.slc = slc
self.astype = astype
self.flatten = flatten
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
return self
def transform(self, X, y=None):
if self.slc.step:
index = range(self.slc.start, self.slc.stop, self.slc.step)
else:
index = range(self.slc.start, self.slc.stop)
result = X[:, index]
if self.astype:
result = result.astype(self.astype)
if self.flatten:
result = result.reshape(X.shape[0])
return result
class RegexSpotter(BaseEstimator):
def __init__(self, regexp):
# store the actual argument, so BaseEstimator.get_params() will do it's magic.
self.regexp = regexp
self.pattern = re.compile(regexp)
def fit(self, X, y=None):
pass
def fit_transform(self, X, y=None):
return self.transform(X)
def transform(self, X, y=None):
matches = np.fromiter((self.pattern.search(x) for x in X), dtype=bool)
return matches[:, np.newaxis]
def regex_pipeline(column, regex):
pipeline = Pipeline([
('slice', SliceFeature(slice(column, column + 1), flatten=True)),
('sha_spotter', RegexSpotter(regex))
])
return pipeline
class Densifier(BaseEstimator):
def fit(self, X, y=None):
pass
def fit_transform(self, X, y=None):
return self.transform(X)
def transform(self, X, y=None):
return X.toarray()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train different models with the same dataset.')
parser.add_argument('-c', '--csv', help='csv file name')
parser.add_argument('-l', '--label', action='store_true', help='label missing data')
parser.add_argument('-d', '--debug', help='turn on debugging: only one job', action='store_true')
args = parser.parse_args()
if not args.csv:
parser.print_help()
exit(1)
if args.debug:
n_jobs = 1
else:
n_jobs = -1
pipeline_summary = Pipeline([
('slice', SliceFeature(slice(0, 1), flatten=True)),
('vect', CountVectorizer(stop_words='english')),
# ('binary', Binarizer()),
('tfidf', TfidfTransformer()),
# ('scaler', StandardScaler(with_mean=False)),
])
pipeline_message = Pipeline([
('slice', SliceFeature(slice(1, 2), flatten=True)),
('vect', CountVectorizer(stop_words='english')),
# ('binary', Binarizer()),
('tfidf', TfidfTransformer()),
# ('scaler', StandardScaler(with_mean=False)),
])
pipeline_numeric = Pipeline([
('slice', SliceFeature(slice(2, 6), astype=int)),
])
main_pipeline = Pipeline([
('features', FeatureUnion([
('summary', pipeline_summary),
('message', pipeline_message),
('numeric', pipeline_numeric),
('contains_sha', regex_pipeline(1, r'[0-9a-eA-E]{6,}')),
# ('contains_http', regex_pipeline(1, r'https?://')),
# ('contains_bugzilla', regex_pipeline(1, r'bugzilla\.kernel\.org')),
# ('contains_lkml', regex_pipeline(1, r'lkml\.kernel\.org')),
])),
# ('densifier', Densifier()),
# ('scaler', StandardScaler(with_mean=False)),
# ('scaler', StandardScaler()),
('clf', LinearSVC()),
# ('clf', LogisticRegression()),
])
parameters = {
'features__summary__vect__max_df': (0.25, 0.5),
# 'features__summary__vect__max_df': (0.5, 0.75, 1.0),
'features__summary__vect__max_features': (None, 10, 100, 1000),
# 'features__summary__vect__max_features': (None, 5000, 10000, 50000),
# 'features__summary__vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
'features__summary__tfidf__use_idf': (True, False),
# 'features__summary__tfidf__norm': ('l1', 'l2'),
'features__message__vect__max_df': (0.5, 0.75, 1.0),
'features__message__vect__max_features': (None, 100, 1000, 5000),
# 'features__message__vect__max_features': (None, 5000, 10000, 50000),
# 'features__message__vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
'features__message__tfidf__use_idf': (True, False),
# 'features__message__tfidf__norm': ('l1', 'l2'),
# 'clf__C': (0.0001, 0.001, 0.01, 0.1, 1.0),
# 'clf__loss': ('l1', 'l2'),
# 'clf__penalty': ('l1', 'l2'),
# 'clf__dual': (True, False),
# 'clf__tol': (1e-4),
# 'clf__multi_class': ('ovr', 'crammer_singer'),
# 'clf__fit_intercept': (True, False),
# 'clf__intercept_scaling': (0.0001, 0.001, 0.01, 0.1, 1.0),
}
grid_search = GridSearchCV(main_pipeline, parameters, n_jobs=n_jobs, verbose=1)
print 'Performing grid search...'
print 'pipeline:', [name for name, _ in main_pipeline.steps]
print 'parameters:'
pprint(parameters)
collapse_map = {
'Fix (Minor)': 'Fix',
'Fix (Major)': 'Fix',
'Regression (Minor)': 'Regression',
'Regression (Major)': 'Regression',
# 'Regression (Minor)': 'Fix',
# 'Regression (Major)': 'Fix',
# 'Refactoring (Minor)': 'Refactoring',
# 'Refactoring (Major)': 'Refactoring',
'Refactoring (Minor)': 'Feature',
'Refactoring (Major)': 'Feature',
'Feature (Minor)': 'Feature',
'Feature (Major)': 'Feature',
'Documentation (Minor)': 'Documentation',
'Documentation (Major)': 'Documentation',
# 'Documentation (Minor)': 'Feature',
# 'Documentation (Major)': 'Feature',
}
data = fetch_gitlog(args.csv, collapse=collapse_map, stats=True)
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, random_state=0)
t0 = time()
grid_search.fit(X_train, y_train)
print 'done in %0.3fs' % (time() - t0)
print
print 'Best score: %0.3f' % grid_search.best_score_
print 'Best parameters set:'
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
print
# Print the confusion matrix
estimator = grid_search.best_estimator_
y_pred = estimator.predict(X_test)
print 'Confusion matrix for', data.target_names
print confusion_matrix(y_test, y_pred)
if args.label:
# Get the full data and add labels
full_data = fetch_gitlog(args.csv, full_data=True)
unknown = [i for i, l in enumerate(full_data.target_names) if not l][0]
y_pred = estimator.predict(full_data.data)
for x, y, y_p in zip(full_data.data, full_data.target, y_pred):
if not y:
print data.target_names[y_p]
else:
print full_data.target_names[y]
| mit | 3,956,446,787,351,912,400 | 33.70318 | 101 | 0.578149 | false |
hofmannedv/training-python | data-structures/double-list-deque.py | 1 | 1098 | # -----------------------------------------------------------
# demonstrates how to create and use a double-linked list
# using the collections module
#o
# (C) 2017 Frank Hofmann, Berlin, Germany
# Released under GNU Public License (GPL)
# email [email protected]
# -----------------------------------------------------------
from collections import deque
class ListNode:
def __init__(self, data):
"constructor class to initiate this object"
# store data
self.data = data
return
# main program
# create three single nodes
node1 = ListNode(15)
node2 = ListNode(8.2)
node3 = ListNode("Berlin")
# create a track (double-ended queue)
track = deque([node1,node2,node3])
print("three items (initial list):")
for item in track:
print (item.data)
print ("--")
# add an item at the beginning
node4 = ListNode(15)
track.appendleft(node4)
print("four items (added as the head):")
for item in track:
print (item.data)
print ("--")
# add an item at the end
node5 = ListNode("Moscow")
print("five items (added at the end):")
track.append(node5)
for item in track:
print (item.data)
| gpl-2.0 | 2,345,776,736,003,069,400 | 20.96 | 61 | 0.629326 | false |
bgshin/doc-classify-multi-gpu | src/cifar10/cifar10_multi_gpu_train.py | 1 | 10418 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using multiple GPU's with synchronous updates.
Accuracy:
cifar10_multi_gpu_train.py achieves ~86% accuracy after 100K steps (256
epochs of data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
--------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
2 Tesla K20m | 0.13-0.20 | ~84% at 30K steps (2.5 hours)
3 Tesla K20m | 0.13-0.18 | ~84% at 30K steps
4 Tesla K20m | ~0.10 | ~84% at 30K steps
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/home/bgshin/works/doc-classify-multi-gpu/src/cifar10/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_gpus', 2,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def tower_loss(scope):
"""Calculate the total loss on a single tower running the CIFAR model.
Args:
scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build inference Graph.
logits = cifar10.inference(images)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
_ = cifar10.loss(logits, labels)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)
tf.summary.scalar(loss_name, l)
return total_loss
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE,
global_step,
decay_steps,
cifar10.LEARNING_RATE_DECAY_FACTOR,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Calculate the gradients for each model tower.
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
loss = tower_loss(scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Add a summary to track the learning rate.
summaries.append(tf.summary.scalar('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create a saver.
saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
gpu_options = tf.GPUOptions(visible_device_list=str('2,3'), allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(
gpu_options = gpu_options,
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| apache-2.0 | 1,322,780,122,425,422,600 | 37.161172 | 110 | 0.649165 | false |
frac/twitter-autoblock | pwytter.py | 1 | 48970 | #!/usr/bin/python
#
# Author : Pierre-Jean Coudert
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
'''A Python Tkinter Twitter Client'''
import sys
from os.path import dirname, join, abspath
try:
sys.path.append(join(abspath(dirname(__file__)), 'twclient'))
except:
sys.path.append(join(abspath(dirname(sys.path[0])), 'twclient'))
__author__ = 'Pierre-Jean Coudert <[email protected]>'
__version__ = '0.8'
APP_NAME = "pwytter"
from Tkinter import *
import tkBalloon
import pwSplashScreen
import twclient
import pwParam
import pwTools
import pwTheme
import time
import thread
import webbrowser
import textwrap
import os
import os.path
from PIL import Image, ImageTk
import gettext
import locale
_imageFile = {}
def imagefromfile(name):
global _imageFile
if name not in _imageFile.keys() :
_imageFile[name] = Image.open(os.path.join("media",name))
_imageFile[name].thumbnail((16,16),Image.ANTIALIAS)
return _imageFile[name]
class ClickableImage(Label):
def __init__(self, parent, imageName, clickCommand, aColor, aName, aHint=None):
self._imageRef = ImageTk.PhotoImage(imagefromfile(imageName))
self._hint = None
Label.__init__(self, parent, image=self._imageRef, bg=aColor, name=aName)
if aHint:
self._hint = tkBalloon.Balloon(self,aHint)
if clickCommand:
self.bind('<1>', clickCommand)
self["cursor"] = 'hand2'
def config(self,**options):
if "text" in options.keys():
self._hint.settext(options["text"])
Label.config(self, options)
class MainPanel(Frame):
""" Main tk Frame """
def __init__(self, master=None):
Frame.__init__(self, master)
#self._imageFile={}
#self._imageRef=[]
self._needToRefreshMe = True
self._imagesLoaded = True
self._imagesFriendsLoaded = True
self._needToShowParameters = False
self._versionChecked = False
self._busy = pwTools.BusyManager(master)
self._params = pwParam.PwytterParams()
self.pos = 0
self.offset = 0
self.Theme = None
self._display={
'fontName':('Helvetica',8,'bold'),
'fontMsg':('Helvetica',8,'bold'),
'fontLink':('Helvetica',9,'underline'),
'widthMsg':58,
'widthTwit':69,
'widthDirectMsg':66,
'friendcolumn':6
}
# if os.name=='mac':
# self._display.update({
# 'fontName':('Helvetica',9,'bold'),
# 'fontMsg':('Helvetica',9,'bold'),
# 'fontLink':('Helvetica',9,'underline'),
# 'widthMsg':61,
# 'widthTwit':61,
# 'widthDirectMsg':58
# })
if os.name=='posix':
print "Linux Theme tuning"
self._display.update({
'fontName':"helvetica 8 ",
'fontMsg': "helvetica 9",
'fontLink':"helvetica 9 underline",
'widthMsg':75,
'widthTwit':62,
'widthDirectMsg':59
})
if sys.platform == "darwin":
print "Mac OSX Theme tuning"
self._display.update({
'fontName':"Helvetica 12 bold",
'fontMsg': "Helvetica",
'fontLink':"Helvetica 12 underline",
'widthMsg':61,
'widthTwit':62,
'widthDirectMsg':59
})
self._loadTheme(self._params['theme'])
self._languages={"Chinese Simplified": {"locale":"zh_CN", "flag":"cn.gif"},
"Chinese Traditional": {"locale":"zh_TW", "flag":"tw.gif"},
"English": {"locale":"en_US", "flag":"us.gif"},
"French": {"locale":"fr_FR", "flag":"fr.gif"},
"German": {"locale":"de_DE", "flag":"de.gif"},
"Italian": {"locale":"it_IT", "flag":"it.gif"},
"Japanese": {"locale":"ja_JP", "flag":"jp.gif"},
"Polish": {"locale":"pl_PL", "flag":"pl.gif"},
"Portuguese": {"locale":"pt_BR", "flag":"br.gif"},
"Romanian": {"locale":"ro_RO", "flag":"ro.gif"},
"Russian": {"locale":"ru_RU", "flag":"ru.gif"},
"Serbian": {"locale":"sr_RS", "flag":"rs.gif"},
"Spanish": {"locale":"es_ES", "flag":"es.gif"},
"Swedish": {"locale":"sv_SE", "flag":"se.gif"}
}
try:
self._params.readFromXML()
except:
self._needToShowParameters = True
self.threadlock = thread.allocate_lock()
self.tw=twclient.TwClient(__version__, self._params)
self._applyParameters()
self._defaultTwitText = _('Enter your message here...')
self.twitText = StringVar()
self.twitText.set(self._defaultTwitText)
self.directText = StringVar()
self.directText.set(_('Enter your direct message here...'))
self.userVar = StringVar()
self.passwordVar = StringVar()
self.refreshVar = IntVar()
self.linesVar = IntVar()
self.timeLineVar= StringVar()
self.timeLineVar.set(self.tw.timeLineName())
self._bg=self._display['bg#']
self['bg']=self._bg
self.pack(ipadx=2, ipady=2,fill=BOTH, expand=YES)
self._create_widgets()
self._refresh_mySelfBox()
self._refresh_version()
if self._needToShowParameters:
self._showParameters()
self._refreshTime = 0
def _setLanguage(self, aLanguage='English'):
#Get the local directory since we are not installing anything
locale_path = os.path.join(os.path.realpath(os.path.dirname(sys.argv[0])),"locale")
langs = []
lc, encoding = locale.getdefaultlocale()
if (lc): langs = [lc]
# Now lets get all of the supported languages on the system
language = os.environ.get('LANGUAGE', None)
if (language): langs += language.split(":")
langs = langs + [alang['locale'] for alang in self._languages.values()]
gettext.bindtextdomain(APP_NAME, locale_path)
gettext.textdomain(APP_NAME)
if aLanguage in self._languages.keys():
self._currentLanguage=aLanguage
else:
self._currentLanguage='English'
try:
langFr = gettext.translation('pwytter',locale_path,languages=[self._languages[self._currentLanguage]['locale']])
langFr.install()
except Exception,e:
print str(e)
# Get the language to use
# self.lang = gettext.translation(APP_NAME, self.locale_path
# , languages=langs, fallback = True)
def _loadTheme(self, aName):
if self.Theme:
self.Theme.setTheme(aName)
else:
self.Theme=pwTheme.pwTheme(aName)
self._display.update(self.Theme.values)
def _applyParameters(self):
print "start apply params running"
self._refreshRate = int(self._params['refresh_rate'])
self._TwitLines = int(self._params['nb_lines'])
self.tw.login(self._params['user'], self._params['password'])
self._loadTheme(self._params['theme'])
self._setLanguage(self._params['language'])
self._bg=self._display['bg#']
self['bg']=self._bg
def _create_mySelfBox(self, aParent):
self.MySelfBox = Frame(aParent)
self.MyImageRef = ImageTk.PhotoImage("RGB",(48,48))
self.MyImage = Label(self.MySelfBox,image=self.MyImageRef )
self.MyImageHint = tkBalloon.Balloon(self.MyImage)
self.MyName = Label(self.MySelfBox,text="...",font=('Helvetica', 14, 'bold'))
self.MyNameHint = tkBalloon.Balloon(self.MyName)
self.ParametersShow = ClickableImage(self.MySelfBox, "cog.png",
self._showParameters,self._display['me_bg#'], "para0", _("Parameters..."))
self.MyUrl = Label(self.MySelfBox,text="http", cursor = 'hand2' )
self._theme_mySelfBox()
self.MyImage.grid(row=0,column=0, rowspan=3, sticky=W,padx=5, pady=5)
self.MyName.grid(row=0,column=1)
self.ParametersShow.grid(row=0,column=2, sticky="E")
self.MyUrl.grid(row=1,column=1, columnspan=2)
self.MyUrl.bind('<1>', self._userClick)
def _theme_mySelfBox(self):
self.ParametersShow.config(bg= self._display['me_bg#'], text = _("Parameters..."))
self.MySelfBox["bg"] = self._display['me_bg#']
self.MyName.config(bg= self._display['me_bg#'],fg=self._display['text#'])
self.MyUrl.config(bg= self._display['me_bg#'], fg= self._display['me_fg#'])
def _refresh_mySelfBox(self):
self._theme_mySelfBox()
try:
self._imagesFriendsLoaded = False
self._needToRefreshMe = not self.tw.getMyDetails()
self.MyImageRef.paste(self.tw.myimage, (0,0,48,18))
try:
self.MyName["text"] = self.tw.me.screen_name.encode('latin-1')
self.MyImageHint.settext(_("%s: %s %cLocation: %s") % (self.tw.me.name.encode('latin-1'),\
self.tw.me.description.encode('latin-1'),13,\
self.tw.me.location.encode('latin-1')))
self.MyNameHint.settext(self.MyImageHint.gettext())
except Exception, e:
self.MyName["text"] = _('Loading...')
self.MyImageHint.settext('')
self.MyNameHint.settext('')
try:
self.MyUrl["text"] = self.tw.me.url.encode('latin-1')
except Exception, e:
self.MyUrl["text"] = ''
except Exception, e:
print "_refreshMe Exception:",str(e)
self._needToRefreshMe = True
def _refresh_version(self):
self._versionChecked = self.tw.VersionChecked()
if self._versionChecked:
if not self.tw.VersionOK:
self._showUpdatePwytter()
def _create_RefreshBox(self, parent):
self.refreshBox = Frame(parent)
self.PwytterLink = ClickableImage(self.refreshBox, "home.png",
self._homeclick,self._bg, "pwyt0",_("Pwytter web site..."))
self.ShowFriends = ClickableImage(self.refreshBox, "side_expand.png",
self._showFriends,self._bg, "frie0",_("Show friends"))
self.HideFriends = ClickableImage(self.refreshBox, "side_contract.png",
self._hideFriends,self._bg, "frie1",_("Hide friends"))
self.Time = Label(self.refreshBox)
self.TimeLine = Label(self.refreshBox, cursor = 'hand2')
self.TimeLineHint=tkBalloon.Balloon(self.TimeLine, _("Switch TimeLine"))
self.timeLineVar.trace("w", self._timeLineMenuClick)
self.TimeLineMenu = Menu(rootTk, tearoff=0)
for tl in self.tw.timeLines:
self.TimeLineMenu.add_radiobutton(label=tl,
variable = self.timeLineVar)
self.Refresh = ClickableImage(self.refreshBox, "arrow_refresh.png",
self.manualRefresh,self._bg, "refr0", _("Refresh"))
self._theme_RefreshBox()
self.PwytterLink.grid(row=0,column=0, sticky="W")
self.ShowFriends.grid(row=0,column=1, sticky="E")
self.Time.grid(row=1,column=0,columnspan=2)
self.TimeLine.grid(row=2,column=0, sticky="W")
self.TimeLine.bind('<1>', self._timeLineClick)
self.Refresh.grid(row=2,column=1, sticky="E")
def _theme_RefreshBox(self):
self.refreshBox.config(bg=self._bg)
self.PwytterLink.config(bg=self._bg, text=_("Pwytter web site..."))
self.ShowFriends.config(bg=self._bg, text=_("Show friends"))
self.HideFriends.config(bg=self._bg, text=_("Hide friends"))
self.Time.config(text=_("Current Time Unknown..."), bg=self._bg, fg=self._display['text#'])
self.TimeLine.config(text=_("Timeline: %s") % (self.tw.timeLineName()),\
bg=self._display['timeline#'], fg=self._display['text#'])
self.TimeLineHint.settext(_("Switch TimeLine"))
self.Refresh.config(bg=self._bg, text=_("Refresh"))
def _create_updateBox(self, aParent):
update_bg=self._display['update#']
self.UpdateEmptyBox = Frame(aParent)
self.UpdateInsideBox = Frame(aParent, width=500)
self.UpdateCancel = ClickableImage(self.UpdateInsideBox, "cross.png",
self._hideUpdate, update_bg, "upca0", _("Cancel"))
self.UpdateLbl=Label(self.UpdateInsideBox, font=self._display['fontLink'], cursor="hand2")
self.UpdateLbl.bind('<1>', self._updateClick)
self.UpdateGo = ClickableImage(self.UpdateInsideBox, "page_go.png",
self._updateClick, update_bg, "upgo0", _("Update now..."))
self.UpdateCancel.grid(row=0,column=0,padx=5,pady=5,sticky=W)
self.UpdateLbl.grid(row=0,column=1,padx=5,pady=5,sticky=W)
self.UpdateGo.grid(row=0,column=2,padx=5,pady=5,sticky=W)
self._theme_updateBox()
def _theme_updateBox(self):
update_bg=self._display['update#']
self.UpdateEmptyBox.config(bg=self._bg)
self.UpdateInsideBox.config(bg=update_bg)
self.UpdateCancel.config(bg=update_bg, text=_("Cancel"))
self.UpdateLbl.config(text=_("A new Pwytter release is available. You should upgrade now !"),
bg=update_bg)
self.UpdateGo.config(bg=update_bg, text=_("Update now..."))
def _create_parameterBox(self, aParent):
param_bg=self._display['param#']
self.ParamEmpyBox = Frame(aParent)
self.ParamInsideBox = Frame(aParent, width=500)
self.ParamCancel = ClickableImage(self.ParamInsideBox, \
"cross.png", self._hideParameters,
param_bg,"parcancel", _('Cancel'))
self.CreateAccountLbl=Label(self.ParamInsideBox, font=self._display['fontLink'],
cursor="hand2")
self.UserLbl=Label(self.ParamInsideBox)
self.UserEntry = Entry(self.ParamInsideBox,textvariable=self.userVar)
self.PasswordLbl=Label(self.ParamInsideBox)
self.PasswordEntry = Entry(self.ParamInsideBox, textvariable=self.passwordVar,
show='*')
self.RefreshLbl=Label(self.ParamInsideBox)
self.refreshEntry = Entry(self.ParamInsideBox, textvariable=self.refreshVar)
self.LinesLbl=Label(self.ParamInsideBox)
self.LinesEntry = Entry(self.ParamInsideBox, textvariable=self.linesVar)
self.BtnBox=Frame(self.ParamInsideBox)
self.ApplyBtn=Button(self.BtnBox, command=self._saveParameters)
self.ThemeLbl=Label(self.ParamInsideBox)
self.themeVar = StringVar(self.ParamInsideBox)
self.themeVar.set(self.Theme.themeName) # default value
self.ThemeBox = OptionMenu(self.ParamInsideBox, self.themeVar, *self.Theme.themeList)
# self.ThemeBox.configure(indicatoron=0, compound='right', image=self._arrow)
self.LanguageLbl=Label(self.ParamInsideBox)
self.languageVar = StringVar(self.ParamInsideBox)
self.languageVar.set(self._currentLanguage) # default value
sorted_languages= self._languages.keys()
sorted_languages.sort()
self._arrow = ImageTk.PhotoImage(imagefromfile("arrow_down.png"))
self.LanguageResultLbl=Label(self.ParamInsideBox, textvariable=self.languageVar,
compound='right', image=self._arrow, cursor = 'hand2',
bd=1, relief="raised")
self.LanguageMenu = Menu(rootTk, tearoff=0)
for lang in sorted_languages:
if sys.platform == "darwin":
self.LanguageMenu.add_radiobutton(label=lang, variable=self.languageVar)
else:
self._languages[lang]['flag_image'] = ImageTk.PhotoImage(imagefromfile(self._languages[lang]['flag']))
self.LanguageMenu.add_radiobutton(label=lang, compound='left',
image=self._languages[lang]['flag_image'],
variable=self.languageVar)
self.LanguageResultLbl.bind('<1>', self._languagePopupClick)
self._theme_parameterBox()
self.ParamCancel.grid(row=0,column=0,padx=5,pady=5,sticky=NW)
self.CreateAccountLbl.bind('<1>', self._createAccountClick)
self.CreateAccountLbl.grid(row=0,column=1, columnspan=3,padx=5,pady=5)
self.UserLbl.grid(row=1,column=0,padx=5,pady=5,sticky=W)
self.UserEntry.grid(row=1, column=1,padx=5,pady=5)
self.PasswordLbl.grid(row=1,column=2,padx=5,pady=5,sticky=W)
self.PasswordEntry.grid(row=1, column=3,padx=5,pady=5)
self.RefreshLbl.grid(row=2,column=0,padx=5,pady=5,sticky=W)
self.refreshEntry.grid(row=2, column=1,padx=5,pady=5)
self.LinesLbl.grid(row=2,column=2,padx=5,pady=5,sticky=W)
self.LinesEntry.grid(row=2, column=3,padx=5,pady=5)
self.ThemeLbl.grid(row=3, column=0, padx=5, pady=5, sticky=W)
self.ThemeBox.grid(row=3, column=1, padx=5, pady=5, sticky=W)
self.LanguageLbl.grid(row=3, column=2, padx=5, pady=5, sticky=W)
self.LanguageResultLbl.grid(row=3, column=3, padx=5, pady=5, ipadx=2, sticky=W)
self.BtnBox.grid(row=4, column=3, columnspan=4, sticky=EW)
self.ApplyBtn.pack(padx=5,pady=5,side="right")
def _theme_parameterBox(self):
param_bg=self._display['param#']
self.ParamEmpyBox.config(bg=self._bg)
self.ParamInsideBox.config(bg=param_bg)
self.ParamCancel.config(bg=param_bg,text=_('Cancel'))
self.CreateAccountLbl.config(text= _("Click here to create a Free Twitter Account..."),
bg=param_bg, fg=self._display['text#'])
self.UserLbl.config(text=_("User"), bg=param_bg)
self.PasswordLbl.config(text=_("Password"), bg=param_bg)
self.RefreshLbl.config(text=_("Refresh (s)"), bg=param_bg)
self.LinesLbl.config(text=_("Lines"), bg=param_bg)
self.BtnBox.config(bg=param_bg)
self.ApplyBtn.config(text=_("Apply"))
self.ThemeLbl.config(text=_("Theme"), bg=param_bg)
self.LanguageLbl.config(text=_("Language"), bg=param_bg)
def _showParameters(self,par=None):
self.userVar.set(self._params['user'])
self.passwordVar.set(self._params['password'])
self.refreshVar.set(self._params['refresh_rate'])
self.linesVar.set(self._params['nb_lines'])
self.ParamEmpyBox.pack_forget()
self.ParamInsideBox.pack(expand=1,pady=2)
def _hideParameters(self,par=None):
self.ParamInsideBox.pack_forget()
self.ParamEmpyBox.pack()
def _saveParameters(self,par=None):
self._params['user'] = self.userVar.get()
self._params['password'] = self.passwordVar.get()
self._params['refresh_rate'] = self.refreshVar.get()
self._params['nb_lines']= self.linesVar.get()
self._params['theme']= self.themeVar.get()
self._params['language']= self.languageVar.get()
self._params.writeToXML()
self._applyParameters()
self._hideParameters()
self._theme_widgets()
self._theme_parameterBox()
self._theme_RefreshBox()
self._theme_friendsBox()
self._theme_updateBox()
self._refresh_mySelfBox()
self.manualRefresh()
def _create_Line(self, aParent, i):
linecolor = self._display['line#']
aLine={}
aLine['Box'] = Frame(aParent, highlightthickness=1, padx=10)
aLine['ImageRef'] = ImageTk.PhotoImage("RGB",(48,48))
aLine['Image'] = Label(aLine['Box'],image=aLine['ImageRef'], \
name="imag"+str(i), cursor="hand2")
aLine['ImageHint']= tkBalloon.Balloon(aLine['Image'])
aLine['NameBox'] = Frame(aLine['Box'])
aLine['Name'] = Label(aLine['NameBox'],text="...", name="name"+str(i),
font=self._display['fontName'], cursor="hand2")
aLine['NameHint'] = tkBalloon.Balloon(aLine['Name'])
aLine['Time'] = Label(aLine['NameBox'],text="...", justify="left")
aLine['IconBox'] = Frame(aLine['Box'], bg=linecolor)
aLine['Direct'] = ClickableImage(aLine['IconBox'], \
"arrow_right.png", self._showDirectMessage, linecolor,"drct"+str(i),\
_('Direct Message...'))
aLine['DirectInvalid'] = ClickableImage(aLine['IconBox'], \
"arrow_nb.png", None, linecolor,"drci"+str(i))
aLine['Favorite'] = ClickableImage(aLine['IconBox'], \
"asterisk_yellow.png", self._unsetFavoriteClick, linecolor,"unfa"+str(i),
_("UnFavorite"))
aLine['FavoriteGray'] = ClickableImage(aLine['IconBox'], \
"asterisk_nb.png", self._setFavoriteClick, linecolor,"favo"+str(i),
_("Favorite"))
aLine['UserUrl'] = ClickableImage(aLine['IconBox'], \
"world_go.png", self._userUrlClick, linecolor,"uurl"+str(i))
aLine['UserUrlHint']= tkBalloon.Balloon(aLine['UserUrl'])
aLine['UserUrlInvalid']= ClickableImage(aLine['IconBox'], \
"world_nb.png", None, linecolor,"iurl"+str(i))
aLine['Reply'] = ClickableImage(aLine['IconBox'], \
"arrow_undo.png", self._replyToMessage, linecolor,"repl"+str(i),
_('Reply to this message...'))
aLine['Retweet'] = ClickableImage(aLine['IconBox'], \
"arrow_switch.png", self._retweetMessage, linecolor,"rt"+str(i),
_('Retweet this message...'))
aLine['Msg'] = Label(aLine['Box'],text="...", name=str(i),\
font=self._display['fontMsg'],\
width=self._display['widthMsg'],
height=3)
aLine['MsgHint']= tkBalloon.Balloon(aLine['Msg'])
directColor = self._display['directMsg#']
aLine['DirectBox'] = Frame(aLine['Box'], padx=3, pady=2)
aLine['DirectBoxEmpty'] = Frame(aLine['Box'])
aLine['DirectCancel'] = ClickableImage(aLine['DirectBox'], \
"cross.png", self._hideDirectMessage, \
directColor,"dcan"+str(i),_('Cancel'))
aLine['DirectEdit'] = Entry(aLine['DirectBox'], width=self._display['widthDirectMsg'],\
textvariable=self.directText, validate="key", \
bd=0, name="dedi"+str(i))
aLine['DirectSend'] = ClickableImage(aLine['DirectBox'], \
"comment.png", self._sendDirectMessage, directColor,\
"dsen"+str(i), _('Send'))
aLine['DirectCancel'].grid(row=0,column=0, sticky='W',padx=1)
aLine['DirectEdit'].grid(row=0,column=1, padx=1)
aLine['DirectSend'].grid(row=0,column=2, sticky='E',padx=1)
aLine['Image'].bind('<1>', self._nameClick)
aLine['Image'].grid(row=0,column=0,rowspan=2, sticky='NW',padx=1,pady=2)
aLine['NameBox'].grid(row=0,column=1, sticky='W')
aLine['Name'].bind('<1>', self._nameClick)
aLine['Name'].grid(row=0,column=0, sticky='W',padx=1)
aLine['Time'].grid(row=0,column=1, sticky='W')
aLine['IconBox'].grid(row=0,column=2, sticky='E')
aLine['Reply'].grid(row=0,column=0, rowspan=1, sticky='E')
aLine['Retweet'].grid(row=0,column=1, rowspan=1, sticky='W')
aLine['Direct'].grid_forget()
aLine['DirectInvalid'].grid(row=0,column=3, rowspan=1, sticky='E')
aLine['Favorite'].grid_forget()
aLine['FavoriteGray'].grid(row=0,column=2, rowspan=1, sticky='E')
aLine['UserUrl'].grid(row=0,column=4, sticky='E')
aLine['UserUrl'].grid_forget()
aLine['UserUrlInvalid'].grid(row=0,column=4, sticky='E')
aLine['Msg'].grid(row=1,column=1,columnspan=2,rowspan=1,padx=1, sticky="news")
aLine['Box'].grid(row=i,padx=0, pady=1, ipadx=0, ipady=0, sticky="news")
aLine['DirectBox'].grid_forget()
aLine['DirectBoxEmpty'].grid(row=2,column=0,columnspan=3,rowspan=1, sticky='W',padx=1)
self._theme_Line(aLine, i)
return aLine
def _theme_Line(self, aLine, index, type='standard'):
if index==self.pos:
border = self._display['text#'] #self._display['1stLine#']
else:
border=self._display['twitEdit#']
linecolor = self._display['line#']
if type == 'direct':
linecolor = self._display['directLine#']
if type == 'reply':
linecolor = self._display['replyLine#']
#AP
aLine['Box'].config(bg=linecolor, highlightbackground=border )
aLine['NameBox'].config(bg=linecolor)
aLine['Name'].config(bg=linecolor, fg=self._display['text#'])
aLine['Time'].config(bg=linecolor,fg=self._display['time#'])
aLine['IconBox'].config(bg=linecolor)
aLine['Direct'].config(bg=linecolor,text=_('Direct Message...'))
aLine['DirectInvalid'].config(bg=linecolor)
aLine['Favorite'].config(bg=linecolor)
aLine['FavoriteGray'].config(bg=linecolor)
aLine['UserUrl'].config(bg=linecolor)
aLine['UserUrlInvalid'].config(bg=linecolor)
aLine['Reply'].config(bg=linecolor)
aLine['Retweet'].config(bg=linecolor)
aLine['Msg'].config(bg=linecolor, fg=self._display['message#'])
directColor = self._display['directMsg#']
aLine['DirectBox'].config(bg=directColor)
aLine['DirectBoxEmpty'].config(bg=linecolor)
aLine['DirectCancel'].config(bg=directColor, text=_('Cancel'))
aLine['DirectEdit'].config(bg=self._bg, fg=self._display['text#'])
aLine['DirectSend'].config(bg=directColor, text=_('Send'))
def _refresh_lines(self, par=None):
self._imagesLoaded=True
i=0
for i in range(min(self._TwitLines,len(self.tw.texts))):
j = i + self.offset
if i+1>len(self.Lines) :
self.Lines.append(self._create_Line(self.LinesBox, i))
self._theme_Line(self.Lines[i], i, self.tw.texts[j]['type'])
name = self.tw.texts[j]["name"]
loaded, aImage= self.tw.imageFromCache(name)
self._imagesLoaded = self._imagesLoaded and loaded
try:
self.Lines[i]['ImageRef'].paste(aImage, (0,0,20,20))
except:
print "error pasintg image:", name
self.Lines[i]['Name']["text"]= name
self.Lines[i]['ImageHint'].settext("http://twitter.com/"+name)
self.Lines[i]['NameHint'].settext("http://twitter.com/"+name)
self.Lines[i]['Time']["text"]= self.tw.texts[j]["time"]
if name==self.MyName["text"]:
self.Lines[i]['Direct'].grid_forget()
self.Lines[i]['DirectInvalid'].grid(row=0,column=3, rowspan=1, sticky='W')
else:
self.Lines[i]['DirectInvalid'].grid_forget()
self.Lines[i]['Direct'].grid(row=0,column=3, rowspan=1, sticky='W')
self.Lines[i]['Msg']["text"]=textwrap.fill(self.tw.texts[j]["msgunicode"], 70, break_long_words=True)
if self.tw.texts[j]["url"]<>'' :
self.Lines[i]['Msg'].bind('<1>', self._urlClick)
self.Lines[i]['Msg']["cursor"] = 'hand2'
self.Lines[i]['Msg']["fg"] = self._display['messageUrl#']
self.Lines[i]['MsgHint'].settext(self.tw.texts[j]["url"])
self.Lines[i]['MsgHint'].enable()
else:
self.Lines[i]['Msg'].bind('<1>', None)
self.Lines[i]['Msg']["cursor"] = ''
self.Lines[i]['Msg']["fg"] = self._display['message#']
self.Lines[i]['MsgHint'].disable()
if self.tw.texts[j]["user_url"] == '':
self.Lines[i]['UserUrl'].bind('<1>', None)
self.Lines[i]['UserUrl']["cursor"] = ''
self.Lines[i]['UserUrl'].grid_forget()
self.Lines[i]['UserUrlInvalid'].grid(row=0, column=4, sticky='E')
else:
self.Lines[i]['UserUrl'].bind('<1>', self._userUrlClick)
self.Lines[i]['UserUrl']["cursor"] = 'hand2'
self.Lines[i]['UserUrlHint'].settext(self.tw.texts[j]["user_url"])
self.Lines[i]['UserUrlInvalid'].grid_forget()
self.Lines[i]['UserUrl'].grid(row=0, column=4, sticky='E')
self.Lines[i]['UserUrl'].grid()
self._imagesLoaded = self._imagesLoaded \
and self.tw.texts[j]["favorite_updated"]
if self.tw.texts[j]["favorite"]:
self.Lines[i]['FavoriteGray'].grid_forget()
self.Lines[i]['Favorite'].grid(row=0,column=2, rowspan=1, sticky='E')
else:
self.Lines[i]['Favorite'].grid_forget()
self.Lines[i]['FavoriteGray'].grid(row=0,column=2, rowspan=1, sticky='E')
self.Lines[i]['Box'].grid(row=i,sticky=W,padx=0, pady=1, ipadx=1, ipady=1)
for i in range(i+1,len(self.Lines)):
self.Lines[i]['Box'].grid_forget()
def _createFriendImage(self, aParent, index, type):
aFriend={}
aFriend['ImageRef'] = ImageTk.PhotoImage("RGB",(20,20))
c=self._display['friendcolumn']
if type=="friend":
aFriend['Image'] = Label(aParent,image=aFriend['ImageRef'], \
name="frie"+str(index), cursor="hand2")
aFriend['ImageHint']= tkBalloon.Balloon(aFriend['Image'])
self.FriendImages.append(aFriend)
else:
aFriend['Image'] = Label(aParent,image=aFriend['ImageRef'], \
name="foll"+str(index), cursor="hand2")
aFriend['ImageHint']= tkBalloon.Balloon(aFriend['Image'])
self.FollowerImages.append(aFriend)
aFriend['Image'].grid(row=1+int(index/c), column=index-(int(index/c)*c), padx=1, pady=1)
return aFriend
def _create_friendsBox(self, aParent):
self.friendsEmptyBox = Frame(aParent)
self.friendsInsideBox = Frame(aParent)
self.FriendImages=[]
self.FriendTitle = Label(self.friendsInsideBox)
for i in range(2):
self._createFriendImage(self.friendsInsideBox,i,"friend")
self.followersEmptyBox = Frame(aParent)
self.followersInsideBox = Frame(aParent)
self.FollowerImages=[]
self.FollowerTitle = Label(self.followersInsideBox)
for i in range(2):
self._createFriendImage(self.followersInsideBox,i,"follower")
self._theme_friendsBox()
self.FriendTitle.grid(row=0,column=0,columnspan=self._display['friendcolumn'])
self.FollowerTitle.grid(row=0,column=0,columnspan=self._display['friendcolumn'])
def _theme_friendsBox(self):
self.friendsEmptyBox.config(bg=self._bg)
self.friendsInsideBox.config(bg=self._bg)
self.FriendTitle.config(text=_("Following"), bg=self._bg, fg=self._display['text#'])
self.followersEmptyBox.config(bg=self._bg)
self.followersInsideBox.config(bg=self._bg)
self.FollowerTitle.config(text=_("Followers"), bg=self._bg, fg=self._display['text#'])
def _refresh_friendsBox(self):
self._imagesFriendsLoaded = True
try:
self._imagesFriendsLoaded = self._imagesFriendsLoaded and self.tw.getFriends()
i=0
for fname in self.tw.Friends[:30]:
if i+1>len(self.FriendImages) :
self._createFriendImage(self.friendsInsideBox,i, "friend")
loaded, aImage= self.tw.imageFromCache(fname)
self._imagesFriendsLoaded = self._imagesFriendsLoaded and loaded
try :
self.FriendImages[i]['ImageRef'].paste(aImage, (0,0,20,20))
except:
print "error pasting friends images:",fname
self.FriendImages[i]['ImageHint'].settext("http://twitter.com/"+fname)
self.FriendImages[i]['Image'].bind('<1>', self._friendClick)
c=self._display['friendcolumn']
self.FriendImages[i]['Image'].grid(row=1+int(i/c), column=i-(int(i/c)*c), padx=1, pady=1)
i=i+1
for i in range(i,len(self.FriendImages)):
self.FriendImages[i]['Image'].grid_forget()
except Exception,e :
print str(e),"-> Can't get friends"
try:
self._imagesFriendsLoaded = self._imagesFriendsLoaded and self.tw.getFollowers()
i=0
for fname in self.tw.Followers[:30]:
if i+1>len(self.FollowerImages) :
self._createFriendImage(self.followersInsideBox,i, "follower")
loaded, aImage= self.tw.imageFromCache(fname)
self._imagesFriendsLoaded = self._imagesFriendsLoaded and loaded
try :
self.FollowerImages[i]['ImageRef'].paste(aImage, (0,0,20,20))
except:
print "error pasting friends images:",fname
self.FollowerImages[i]['ImageHint'].settext("http://twitter.com/"+fname)
self.FollowerImages[i]['Image'].bind('<1>', self._friendClick)
c=self._display['friendcolumn']
self.FollowerImages[i]['Image'].grid(row=1+int(i/c), column=i-(int(i/c)*c), padx=1, pady=1)
i=i+1
for i in range(i,len(self.FollowerImages)):
self.FollowerImages[i]['Image'].grid_forget()
except Exception,e :
print str(e),"-> Can't get followers"
def _showFriends(self,par=None):
self.friendsEmptyBox.pack_forget()
self.friendsInsideBox.pack(expand=1,padx=2)
self.followersEmptyBox.pack_forget()
self.followersInsideBox.pack(expand=1,padx=2)
self.ShowFriends.grid_forget()
self.HideFriends.grid(row=0,column=1, sticky="E")
def _hideFriends(self,par=None):
self.friendsInsideBox.pack_forget()
self.friendsEmptyBox.pack()
self.followersInsideBox.pack_forget()
self.followersEmptyBox.pack(expand=1,padx=2)
self.HideFriends.grid_forget()
self.ShowFriends.grid(row=0,column=1, sticky="E")
def _showUpdatePwytter(self,par=None):
self.UpdateEmptyBox.grid_forget()
self.UpdateInsideBox.grid(row=0,column=0)
def _hideUpdate(self,par=None):
self.UpdateInsideBox.grid_forget()
self.UpdateEmptyBox.grid(row=0,column=0)
def _showDirectMessage(self,par=None):
# theres no offset here.
lineIndex= int(par.widget.winfo_name()[4:])
self.Lines[lineIndex]['DirectBoxEmpty'].grid_forget()
self.Lines[lineIndex]['DirectBox'].grid(row=2,column=0,columnspan=3,rowspan=1, sticky='W',padx=1)
def _hideDirectMessage(self,par=None):
#theres no offset here
lineIndex= int(par.widget.winfo_name()[4:])
self.Lines[lineIndex]['DirectBox'].grid_forget()
self.Lines[lineIndex]['DirectBoxEmpty'].grid(row=2,column=0,columnspan=3,rowspan=1, sticky='W',padx=1)
def _replyToMessage(self,par=None):
lineIndex= int(par.widget.winfo_name()[4:]) + self.offset
self.twitText.set('@'+self.tw.texts[lineIndex]["name"]+" ")
self.TwitEdit.icursor(140)
self.TwitEdit.focus_set()
def _retweetMessage(self,par=None):
lineIndex= int(par.widget.winfo_name()[2:]) + self.offset
self.twitText.set('RT:@'+self.tw.texts[lineIndex]["name"]+ " "+self.tw.texts[lineIndex]["msg"])
self.TwitEdit.icursor(140)
self.TwitEdit.focus_set()
def _sendDirectMessage(self,par=None):
print "start dmthread in threads"
thread.start_new_thread(self._sendDirectMessage_thread,(par))
def _sendDirectMessage_thread(self,par=None):
self.threadlock.acquire()
print "start dmthread running"
self._busy.set()
try:
lineIndex= int(par.widget.winfo_name()[4:]) + self.offset
try:
print self.tw.sendDirectMessage(self.tw.texts[lineIndex]["name"], self.directText.get())
except Exception,e :
print str(e),'-> error sending direct msg:',self.directText.get(),'to',self.tw.texts[lineIndex]["name"]
self._hideDirectMessage(par)
finally:
self._busy.reset()
self.threadlock.release()
print "finish dmthread in threads"
def _create_widgets(self):
self.MainZone = Frame(self, highlightthickness=0)
self._create_mySelfBox(self.MainZone)
self._create_RefreshBox(self.MainZone)
self.ParameterBox = Frame(self.MainZone)
self._create_parameterBox(self.ParameterBox)
self.LinesBox= Frame(self.MainZone, takefocus=True,highlightthickness=1, padx=0, pady=0)
self.LinesBox.bind('<j>', self.go_down)
self.LinesBox.bind('<k>', self.go_up)
self.LinesBox.bind('<Button-4>', self.go_up)
self.LinesBox.bind('<Button-5>', self.go_down)
#AP
#self.LinesBox.protocol('WM_TAKE_FOCUS', self.take_focus)
#self.LinesBox.bind('WM_TAKE_FOCUS', self.take_focus)
#self.LinesBox.pack()
self.Lines=[]
for i in range(self._TwitLines):
self.Lines.append(self._create_Line(self.LinesBox, i))
self.EditParentBox = Frame(self.MainZone, bg=self._bg, highlightthickness=0)
self.RemainCar = Label(self.EditParentBox,text="...")
self.editBox = Frame(self.EditParentBox,)
self.TwitEdit = Entry(self.editBox, width=self._display['widthTwit'],\
textvariable=self.twitText, bd=0)
self.Send = ClickableImage(self.editBox, "comment.png",
self.sendTwit,self._bg, "send0",_("Send"))
self.UpdateZone = Frame(self)
self._create_updateBox(self.UpdateZone)
self.FriendZone = Frame(self)
self._create_friendsBox(self.FriendZone)
self.MySelfBox.grid(row=0, column=0, padx=2, ipadx=6, pady=2, sticky="W")
self.refreshBox.grid(row=0, column=1, sticky="SE")
self.ParameterBox.grid(row=1,column=0,columnspan=2)
self._hideParameters()
self.LinesBox.grid(row=3, column=0,columnspan=2, sticky="news")
self.RemainCar.pack(padx=0)
self.TwitEdit.pack(side="left",padx=2, ipadx=2, ipady=2)
self.Send.pack(side="left", padx=2, ipadx=1, ipady=1)
self.TwitEdit.bind("<Return>", self.sendTwit)
self.TwitEdit.bind('<Button-1>',self.emptyTwit)
self.twitText.trace("w", self.editValidate)
self.editBox.pack(side=BOTTOM, anchor=S)
self.EditParentBox.grid(row=5,column=0,columnspan=2, pady=2, sticky="news")
self.UpdateZone.grid(row=0,column=0, sticky="news")
self.MainZone.grid(row=1,column=0,sticky="news")
self.FriendZone.grid(row=1,column=1, sticky="news")
self._hideFriends()
self._theme_widgets()
def _theme_widgets(self):
self.MainZone.config(bg=self._bg)
self.ParameterBox.config(bg=self._bg)
self.LinesBox.config(bg=self._bg)
self.EditParentBox.config(bg=self._bg)
self.RemainCar.config(bg=self._bg, fg=self._display['text#'] )
self.editValidate()
self.editBox.config(bg=self._bg)
self.LinesBox.config(highlightcolor=self._display['update#'], highlightbackground=self._bg)
self.TwitEdit.config(bg=self._display['twitEdit#'], fg=self._display['text#'],highlightcolor=self._display['text#'], highlightbackground=self._display['twitEdit#'] )
self.Send.config(bg=self._bg, text=_("Send"))
self.UpdateZone.config(bg=self._bg)
self.FriendZone.config(bg=self._bg)
def _openweb(self,url):
try :
webbrowser.open(url)
except Exception,e :
print str(e),'-> Cannot open Browser with url:',url
def _homeclick(self,par=None):
self._openweb('http://www.pwytter.com')
def _userClick(self,par=None):
self._openweb(self.tw.me.url.encode('latin-1'))
def _createAccountClick(self,par=None):
self._openweb('https://twitter.com/signup')
def _updateClick(self,par=None):
self._openweb('http://www.pwytter.com/download')
def _urlClick(self,par=None):
lineIndex= int(par.widget.winfo_name()) + self.offset
self._openweb(self.tw.texts[lineIndex]["url"])
def _nameClick(self,par=None):
lineIndex= int(par.widget.winfo_name()[4:]) + self.offset
self._openweb("http://twitter.com/"+self.tw.texts[lineIndex]["name"])
def _friendClick(self,par=None):
friendIndex= int(par.widget.winfo_name()[4:]) + self.offset
self._openweb(self.FriendImages[friendIndex]['ImageHint'].gettext())
def _userUrlClick(self,par=None):
lineIndex= int(par.widget.winfo_name()[4:]) + self.offset
userurl = self.tw.texts[lineIndex]["user_url"]
if userurl != "": self._openweb(userurl)
def _setFavoriteClick(self,par=None):
lineIndex= int(par.widget.winfo_name()[4:]) + self.offset
print "Set Favo id",self.tw.texts[lineIndex]["id"]
self.tw.createFavorite(self.tw.texts[lineIndex]["name"],
self.tw.texts[lineIndex]["id"])
self.tw.texts[lineIndex]["favorite"]=True
self._refresh_lines()
def _unsetFavoriteClick(self,par=None):
lineIndex= int(par.widget.winfo_name()[4:]) + self.offset
print "UnSet Favo id",self.tw.texts[lineIndex]["id"]
self.tw.destroyFavorite(self.tw.texts[lineIndex]["name"],
self.tw.texts[lineIndex]["id"])
self.tw.texts[lineIndex]["favorite"]=False
self._refresh_lines()
def _timeLineClick(self,event=None):
if event:
self.TimeLineMenu.post(event.x_root, event.y_root)
def _timeLineMenuClick(self,*dummy):
self._busy.set()
try:
self.tw.setTimeLine(self.timeLineVar.get())
print "Switch to Timeline:",self.tw.timeLineName()
self.TimeLine["text"] = _("Timeline: %s") %(self.tw.timeLineName())
self._refreshTwitZone()
finally:
self._busy.reset()
def _languagePopupClick(self,event=None):
if event:
self.LanguageMenu.post(event.x_root, event.y_root)
def _refreshTwitZone(self):
print "start refreshing in threads"
thread.start_new_thread(self._refreshTwitZone_thread,())
def _refreshTwitZone_thread(self):
self.threadlock.acquire()
print "start refreshing running"
timestr = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
self.Time["text"]= timestr
try:
self.tw.refresh()
self._refresh_lines()
self.editValidate()
except Exception, e :
self.Time["text"]=textwrap.fill("Refresh error: "+timestr+" >> "+str(e), 50, break_long_words=True)
# finally:
# pass
print "finish refreshing in threads"
self.threadlock.release()
def timer(self):
try:
if time.time()-self._refreshTime >= self._refreshRate :
self._refreshTwitZone()
self._refreshTime = time.time()
if not self._imagesLoaded :
#self.tw.refresh()
self._refresh_lines()
if self._needToRefreshMe:
self._refresh_mySelfBox()
self._refresh_lines()
if not self._imagesFriendsLoaded :
self._refresh_friendsBox()
if not self._versionChecked :
self._refresh_version()
finally:
self.after(1000, self.timer)
def sendTwit(self,par=None):
print "start twit in threads"
thread.start_new_thread(self._sendTwit_thread,(par))
def _sendTwit_thread(self,par=None):
self.threadlock.acquire()
print "start twit running"
self._busy.set()
try:
self.tw.sendText(self.twitText.get())
self.twitText.set('')
self._refreshTwitZone()
self.editValidate()
finally:
self._busy.reset()
self.threadlock.release()
print "finish twit in threads"
def emptyTwit(self,par=None):
if self.twitText.get() == self._defaultTwitText:
self.twitText.set('')
def manualRefresh(self,par=None):
self._busy.set()
try:
self._refreshTwitZone()
finally:
self._busy.reset()
def editValidate(self, *dummy):
text = self.twitText.get()
actualLength=len(text)
if (actualLength>0) and (text[0]=='@') :
self.TwitEdit.config(bg= self._display['replyLine#'],
fg=self._display['text#'])
else:
self.TwitEdit.config(bg=self._display['twitEdit#'],
fg=self._display['text#'])
if actualLength>140:
self.twitText.set(self.twitText.get()[:140])
else:
self.RemainCar["text"] = _("%d character(s) left (%d tweets)") % ((140-actualLength), len(self.tw.texts))
def go_down(self, event):
#if self.pos >= len(self.Lines) -1:
# return
if self.pos >= self._TwitLines - 2 and self.offset < len(self.tw.texts) - self._TwitLines:
self.offset += 1
self.pos = self._TwitLines - 2
self._refresh_lines()
elif self.pos < self._TwitLines -1:
self.pos += 1
self._theme_Line(self.Lines[self.pos-1],self.pos-1)
self._theme_Line(self.Lines[self.pos],self.pos)
def go_up(self, event):
#if self.pos == 0:
# return
if self.pos == 1 and self.offset > 0:
self.offset -= 1
self._refresh_lines()
elif self.pos > 0:
self.pos -= 1
self._theme_Line(self.Lines[self.pos+1],self.pos+1)
self._theme_Line(self.Lines[self.pos],self.pos)
def _initTranslation():
"""Translation stuff : init locale and get text"""
gettext.install(APP_NAME)
if __name__ == "__main__":
print "Starting Pwytter..."
_initTranslation()
rootTk = Tk()
#rootTk.option_add("*Label*font","FreeSans 10 bold")
rootTk.title('Pwytter %s' % (__version__))
rootTk.resizable(width=600, height=30)
#AP rootTk.wm_iconbitmap(os.path.join("media",'pwytter.ico'))
if os.name == 'nt':
rootTk.iconbitmap(os.path.join("media",'pwytter.ico'))
s = pwSplashScreen.Splash(rootTk)
app = MainPanel(master=rootTk)
app.timer()
s.destroy()
app.mainloop()
| gpl-2.0 | 1,601,154,275,577,548,000 | 45.285444 | 173 | 0.567592 | false |
StamusNetworks/scirius | rules/backup.py | 1 | 7435 | """
Copyright(C) 2016, Stamus Networks
Written by Eric Leblond <[email protected]>
This file is part of Scirius.
Scirius is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Scirius is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Scirius. If not, see <http://www.gnu.org/licenses/>.
"""
from django.conf import settings
import tarfile
import tempfile
import shutil
import os
import sys
import json
from dbbackup.dbcommands import DBCommands
from dbbackup.storage.base import BaseStorage
from dbbackup.utils import filename_generate
from django.core.management import call_command
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.loader import MigrationLoader
DB_SERVERNAME = "scirius"
class SCBackupException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class SCOperation(object):
def get_migration_levels(self):
connection = connections[DEFAULT_DB_ALIAS]
loader = MigrationLoader(connection, ignore_no_migrations=True)
graph = loader.graph
app_names = sorted(loader.migrated_apps)
last_migrations = {}
for app_name in app_names:
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
# Give it a nice title if it's a squashed one
title = plan_node[1]
if graph.nodes[plan_node].replaces:
title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces)
# Mark it as applied/unapplied
if plan_node in loader.applied_migrations:
shown.add(plan_node)
last_migrations[app_name] = int(plan_node[1].split('_')[0])
else:
continue
connection.close()
return last_migrations
def is_migration_level_lower(self, miglevel):
llevel = self.get_migration_levels()
for key in llevel:
# removing application is unlikely so if miglebel don't have a key
# then it is are older
if key not in miglevel:
return True
if llevel[key] < miglevel[key]:
return False
return True
class SCBackup(SCOperation):
def __init__(self):
self.storage = BaseStorage.storage_factory()
self.servername = DB_SERVERNAME
def backup_git_sources(self):
# Create a tar of the git sources in the target directory
sys.stdout.write("%s in %s\n" % (settings.GIT_SOURCES_BASE_DIRECTORY, self.directory))
ts = tarfile.open(os.path.join(self.directory, 'sources.tar'), 'w')
call_dir = os.getcwd()
os.chdir(settings.GIT_SOURCES_BASE_DIRECTORY)
ts.add('.')
ts.close()
os.chdir(call_dir)
def backup_db(self):
database = settings.DATABASES['default']
self.dbcommands = DBCommands(database)
with open(os.path.join(self.directory, 'dbbackup'), 'w') as outputfile:
self.dbcommands.run_backup_commands(outputfile)
def backup_ruleset_middleware(self):
try:
__import__("%s.%s" % (settings.RULESET_MIDDLEWARE, 'backup'))
except ImportError:
return
probe_class = __import__(settings.RULESET_MIDDLEWARE)
probe_class.backup.backup(self.directory)
def write_migration_level(self):
last_migrations = self.get_migration_levels()
migfile = os.path.join(self.directory, 'miglevel')
with open(migfile, 'w') as miglevel:
miglevel.write(json.dumps(last_migrations))
def run(self):
self.directory = tempfile.mkdtemp()
self.write_migration_level()
self.backup_db()
self.backup_git_sources()
self.backup_ruleset_middleware()
# create tar archive of dir
call_dir = os.getcwd()
os.chdir(self.directory)
filename = filename_generate('tar.bz2', self.dbcommands.settings.database['NAME'], self.servername)
outputfile = tempfile.SpooledTemporaryFile()
ts = tarfile.open(filename, 'w:bz2', fileobj=outputfile)
for dfile in os.listdir('.'):
ts.add(dfile)
ts.close()
self.storage.write_file(outputfile, filename)
shutil.rmtree(self.directory)
os.chdir(call_dir)
class SCRestore(SCOperation):
def __init__(self, filepath=None):
self.storage = BaseStorage.storage_factory()
if filepath:
self.filepath = filepath
else:
self.filepath = self.storage.get_latest_backup()
self.servername = DB_SERVERNAME
def restore_git_sources(self):
sys.stdout.write("Restoring to %s from %s\n" % (settings.GIT_SOURCES_BASE_DIRECTORY, self.directory))
ts = tarfile.open(os.path.join(self.directory, 'sources.tar'), 'r')
shutil.rmtree(settings.GIT_SOURCES_BASE_DIRECTORY, ignore_errors=True)
if not os.path.exists(settings.GIT_SOURCES_BASE_DIRECTORY):
os.mkdir(settings.GIT_SOURCES_BASE_DIRECTORY)
os.chdir(settings.GIT_SOURCES_BASE_DIRECTORY)
ts.extractall()
def restore_db(self):
database = settings.DATABASES['default']
self.dbcommands = DBCommands(database)
filepath = os.path.join(self.directory, 'dbbackup')
with open(filepath, 'r') as inputfile:
self.dbcommands.run_restore_commands(inputfile)
def restore_ruleset_middleware(self):
try:
__import__("%s.%s" % (settings.RULESET_MIDDLEWARE, 'backup'))
except ImportError:
return
probe_class = __import__(settings.RULESET_MIDDLEWARE)
probe_class.backup.restore(self.directory)
def test_migration_level(self):
miglevel = None
with open(os.path.join(self.directory, 'miglevel'), 'r') as migfile:
miglevel = json.load(migfile)
return self.is_migration_level_lower(miglevel)
def run(self):
# extract archive in tmp directory
inputfile = self.storage.read_file(self.filepath)
call_dir = os.getcwd()
ts = tarfile.open(self.filepath, 'r', fileobj=inputfile)
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
ts.extractall()
ts.close()
self.directory = tmpdir
if self.test_migration_level() is False:
raise SCBackupException(
"Backup is newer than local Scirius version, please update local instance and apply migrations."
)
self.restore_git_sources()
self.restore_db()
# Apply upgrades
call_command('migrate', '--noinput')
self.restore_ruleset_middleware()
shutil.rmtree(tmpdir)
os.chdir(call_dir)
| gpl-3.0 | 913,863,219,099,019,300 | 34.745192 | 112 | 0.627841 | false |
fulfilio/trytond-shipping-ups | party.py | 1 | 12823 | # -*- encoding: utf-8 -*-
"""
Customizes party address to have address in correct format for UPS API .
"""
import re
# Remove when we are on python 3.x :)
from orderedset import OrderedSet
from lxml import etree
from logbook import Logger
from ups.worldship_api import WorldShip
from ups.shipping_package import ShipmentConfirm
from ups.base import PyUPSException
from trytond.pool import Pool, PoolMeta
from trytond.transaction import Transaction
__all__ = ['Address']
__metaclass__ = PoolMeta
digits_only_re = re.compile('\D+')
logger = Logger('trytond_ups')
class Address:
'''
Address
'''
__name__ = "party.address"
@classmethod
def __setup__(cls):
super(Address, cls).__setup__()
cls._error_messages.update({
'ups_field_missing':
'%s is missing in %s.'
})
def _get_ups_address_xml(self):
"""
Return Address XML
"""
if not all([self.street, self.city, self.country]):
self.raise_user_error("Street, City and Country are required.")
if self.country.code in ['US', 'CA'] and not self.subdivision:
self.raise_user_error(
"State is required for %s" % self.country.code
)
if self.country.code in ['US', 'CA', 'PR'] and not self.zip:
# If Shipper country is US or Puerto Rico, 5 or 9 digits is
# required. The character - may be used to separate the first five
# digits and the last four digits. If the Shipper country is CA,
# then the postal code is required and must be 6 alphanumeric
# characters whose format is A#A#A# where A is an uppercase letter
# and # is a digit. For all other countries the postal code is
# optional and must be no more than 9 alphanumeric characters long.
self.raise_user_error("ZIP is required for %s" % self.country.code)
vals = {
'AddressLine1': self.street[:35], # Limit to 35 Char
'City': self.city[:30], # Limit 30 Char
'CountryCode': self.country.code,
}
if self.streetbis:
vals['AddressLine2'] = self.streetbis[:35] # Limit to 35 char
if self.subdivision:
# TODO: Handle Ireland Case
vals['StateProvinceCode'] = self.subdivision.code[3:]
if self.zip:
vals['PostalCode'] = self.zip
return ShipmentConfirm.address_type(**vals)
def to_ups_from_address(self):
'''
Converts party address to UPS `From Address`.
:return: Returns instance of FromAddress
'''
Company = Pool().get('company.company')
vals = {}
if not self.party.phone and not getattr(self, 'phone'):
self.raise_user_error(
"ups_field_missing",
error_args=('Phone no.', '"from address"')
)
company_id = Transaction().context.get('company')
if not company_id:
self.raise_user_error(
"ups_field_missing",
error_args=('Company', 'context')
)
company_party = Company(company_id).party
if getattr(self, 'phone'):
phone = getattr(self, 'phone')
else:
phone = self.party.phone
vals = {
'CompanyName': company_party.name,
'AttentionName': self.name or self.party.name,
'TaxIdentificationNumber': company_party.identifiers and
company_party.identifiers[0].code or '',
'PhoneNumber': digits_only_re.sub('', phone),
}
fax = self.party.fax
if fax:
vals['FaxNumber'] = fax
# EMailAddress
email = self.party.email
if email:
vals['EMailAddress'] = email
return ShipmentConfirm.ship_from_type(
self._get_ups_address_xml(), **vals)
def to_ups_to_address(self):
'''
Converts party address to UPS `To Address`.
:return: Returns instance of ToAddress
'''
party = self.party
tax_identification_number = ''
if party.identifiers:
tax_identification_number = party.identifiers[0].code
elif hasattr(party, 'tax_exemption_number') and \
party.tax_exemption_number:
tax_identification_number = party.tax_exemption_number
vals = {
'CompanyName': self.name or party.name,
'TaxIdentificationNumber': tax_identification_number,
'AttentionName': self.name or party.name,
}
if getattr(self, 'phone'):
phone = getattr(self, 'phone')
else:
phone = party.phone
if phone:
vals['PhoneNumber'] = digits_only_re.sub('', phone)
fax = party.fax
if fax:
vals['FaxNumber'] = fax
# EMailAddress
email = party.email
if email:
vals['EMailAddress'] = email
# TODO: LocationID is optional
return ShipmentConfirm.ship_to_type(self._get_ups_address_xml(), **vals)
def to_ups_shipper(self, carrier):
'''
Converts party address to UPS `Shipper Address`.
:return: Returns instance of ShipperAddress
'''
Company = Pool().get('company.company')
vals = {}
if not self.party.phone and not getattr(self, 'phone'):
self.raise_user_error(
"ups_field_missing",
error_args=('Phone no.', '"Shipper Address"')
)
company_id = Transaction().context.get('company')
if not company_id:
self.raise_user_error(
"ups_field_missing", error_args=('Company', 'context')
)
company_party = Company(company_id).party
if getattr(self, 'phone'):
phone = getattr(self, 'phone')
else:
phone = self.party.phone
vals = {
'CompanyName': company_party.name,
'TaxIdentificationNumber': company_party.identifiers and
company_party.identifiers[0].code or '',
'Name': self.name or self.party.name,
'AttentionName': self.name or self.party.name,
'PhoneNumber': digits_only_re.sub('', phone),
'ShipperNumber': carrier.ups_shipper_no,
}
fax = self.party.fax
if fax:
vals['FaxNumber'] = fax
# EMailAddress
email = self.party.email
if email:
vals['EMailAddress'] = email
return ShipmentConfirm.shipper_type(
self._get_ups_address_xml(),
**vals
)
def _ups_address_validate(self):
"""
Validates the address using the PyUPS API.
.. tip::
This method is not intended to be called directly. It is
automatically called by the address validation API of
trytond-shipping module.
"""
Subdivision = Pool().get('country.subdivision')
Address = Pool().get('party.address')
PartyConfig = Pool().get('party.configuration')
config = PartyConfig(1)
carrier = config.default_validation_carrier
if not carrier:
# TODO: Make this translatable error message
self.raise_user_error(
"Validation Carrier is not selected in party configuration."
)
api_instance = carrier.ups_api_instance(call='address_val')
if not self.country:
# XXX: Either this or assume it is the US of A
self.raise_user_error('Country is required to validate address.')
values = {
'CountryCode': self.country.code,
}
if self.subdivision:
# Fetch ups compatible subdivision
values['StateProvinceCode'] = self.subdivision.code.split('-')[-1]
if self.city:
values['City'] = self.city
if self.zip:
values['PostalCode'] = self.zip
address_request = api_instance.request_type(**values)
# Logging.
logger.debug(
'Making Address Validation Request to UPS for Address Id: {0}'
.format(self.id)
)
logger.debug(
'--------AV API REQUEST--------\n%s'
'\n--------END REQUEST--------'
% etree.tostring(address_request, pretty_print=True)
)
try:
address_response = api_instance.request(address_request)
# Logging.
logger.debug(
'--------AV API RESPONSE--------\n%s'
'\n--------END RESPONSE--------'
% etree.tostring(address_response, pretty_print=True)
)
except PyUPSException, exc:
self.raise_user_error(unicode(exc[0]))
if (len(address_response.AddressValidationResult) == 1) and \
address_response.AddressValidationResult.Quality.pyval == 1:
# This is a perfect match and there is no need to make
# suggestions.
return True
# The UPS response will include the following::
#
# * City
# * StateProvinceCode
# * PostalCodeLowEnd (Not very useful)
# * PostalCodeHighEnd (Not Very Useful)
#
# Example: https://gist.github.com/tarunbhardwaj/4df0673bdd1c7bc6ab89
#
# The approach here is to clear out the duplicates with just city
# and the state and only return combinations of address which
# differentiate based on city and state.
#
# (In most practical uses, it would just be the city that keeps
# changing).
unique_combinations = OrderedSet([
(node.Address.City.text, node.Address.StateProvinceCode.text)
for node in address_response.AddressValidationResult
])
# This part is sadly static... wish we could verify more than the
# state and city... like the street.
base_address = {
'name': self.name,
'street': self.street,
'streetbis': self.streetbis,
'country': self.country,
'zip': self.zip,
}
matches = []
for city, subdivision_code in unique_combinations:
try:
subdivision, = Subdivision.search([
('code', '=', '%s-%s' % (
self.country.code, subdivision_code
))
])
except ValueError:
# If a unique match cannot be found for the subdivision,
# we wont be able to save the address anyway.
continue
if (self.city.upper() == city.upper()) and \
(self.subdivision == subdivision):
# UPS does not know it, but this is a right address too
# because we are suggesting exactly what is already in the
# address.
return True
matches.append(
Address(city=city, subdivision=subdivision, **base_address)
)
return matches
def to_worldship_address(self):
"""
Return the dict for worldship address xml
"""
Company = Pool().get('company.company')
vals = {}
company_id = Transaction().context.get('company')
if not company_id:
self.raise_user_error(
"ups_field_missing",
error_args=('Company', 'context')
)
company_party = Company(company_id).party
if getattr(self, 'phone'):
phone = getattr(self, 'phone')
else:
phone = self.party.phone
vals = {
'CompanyOrName': company_party.name,
'Attention': self.name or self.party.name,
'Address1': self.street or '',
'Address2': self.streetbis or '',
'CountryTerritory': self.country and self.country.code,
'PostalCode': self.zip or '',
'CityOrTown': self.city or '',
'StateProvinceCounty':
self.subdivision and self.subdivision.code[3:],
'Telephone': digits_only_re.sub('', phone),
}
return vals
def to_worldship_to_address(self):
"""
Return xml object of to address
"""
values = self.to_worldship_address()
values['CompanyOrName'] = self.name or self.party.name
return WorldShip.ship_to_type(**values)
def to_worldship_from_address(self):
"""
Return xml object from address
"""
values = self.to_worldship_address()
return WorldShip.ship_from_type(**values)
| bsd-3-clause | -1,631,809,038,137,822,000 | 31.0575 | 80 | 0.548858 | false |
wsilva/humor-classifier-with-naive-bayes | python/humor-classifier.py | 1 | 9040 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, time, sys, itertools, math
import re
import random
import NaiveBayesWordClassifier
# holdout
def separateTrainAndTestGroupsUsingHoldout(csvfile='Sentiment Analysis Dataset.csv', testSetPercentSize=0.3):
# reading file
sourceFile = open(csvfile, 'r')
lines = sourceFile.readlines()
# removing first line
lines = lines[1:]
# sort randonly
random.shuffle(lines)
# calculating limit of lines in test file
testFileLimit = int(testSetPercentSize * len(lines))
testFile = open('testSetFile.csv', 'w')
trainFile = open('trainSetFile.csv', 'w')
# populating each file
cont = 0
for line in lines:
if cont<testFileLimit:
testFile.write(line)
else:
trainFile.write(line)
cont+=1
sourceFile.close()
testFile.close()
trainFile.close()
return True
# crossvalidation
def separateTrainAndTestGroupsUsingCrossvalidation(csvfile='Sentiment Analysis Dataset.csv', qtdeFolds=10):
# reading file
sourceFile = open(csvfile, 'r')
lines = sourceFile.readlines()
# removing first line
lines = lines[1:]
# sort randonly
random.shuffle(lines)
# calculating limit of lines in each file
foldLimit = int(len(lines)/qtdeFolds)
# opening fold files
foldDictionary = {}
for fold in range(1, 1+qtdeFolds):
filename = 'crossvalidation-{}.csv'.format(fold)
foldDictionary[fold] = open(filename, 'w')
# populating each fold file
cont = 0
fold = 1
for line in lines:
foldDictionary[fold].write(line)
if cont<foldLimit:
cont+=1
else:
fold+=1
cont=0
# closing destination files
for fold in range(1, 1+qtdeFolds):
foldDictionary[fold].close()
# closing source file
sourceFile.close()
return True
def readFileToLists(csvfile='trainSetFile.csv'):
arq = open(csvfile, 'r')
documents = []
# reading all lines
sourceFile = arq.readlines()
for line in sourceFile :
# get only 4 columns ignoring comas inside the tweet
columns = line.split(',', 3)
# get document number and tweet text in lower case
document = columns[0]
sentiment = columns[1]
text = columns[3].lower()
# removing 'trashy' characters with regex
text = re.sub(r'[^0-9a-z\ ]','', text)
documents.append([document, sentiment, text])
# closing file
arq.close()
return documents
def holdoutFlow(removeStopWords=False):
trainSizePercent = 0.3
print "Embaralhando dados e separando em conjuntos de treinamento (%d%% trainSetFile.csv) e teste (%d%% testSetFile.csv)..." % ((100*trainSizePercent), (1-trainSizePercent)*100)
separateTrainAndTestGroupsUsingHoldout("../Sentiment Analysis Dataset.csv", 0.3)
print "Concluído"
print ""
print "Lendo o arquivo de treino (trainSetFile.csv) para memória, limpando e preparando..."
listOfDocuments = readFileToLists("trainSetFile.csv")
print "Concluído"
print ""
print "Criando e treinando o Naive Bayes com o arquivo de treinamento (testSetFile.csv)..."
nb = NaiveBayesWordClassifier.NaiveBayesWordClassifier()
nb.train(listOfDocuments, removeStopWords)
print "Concluído"
print ""
print "Resultados do treinamento:"
print "|V| = %s (vocabulário)" % (nb.vocabulary)
print "Prioris: P(pos) = %.5f" % (float(nb.qtdeDocument['1']) / float(nb.totalDocuments))
print " P(neg) = %.5f" % (float(nb.qtdeDocument['0']) / float(nb.totalDocuments))
print "ni (número total da frequência de palavras de cada classe):"
print " n[pos] = %s" % (nb.freq['1'])
print " n[neg] = %s" % (nb.freq['0'])
print "P(wi|ci) - Probabilidade condicional de cada palavra dada uma classe."
print "Limitando quantidade de exemplos em 2"
cont=0
for word, prob in nb.probability.iteritems():
cont+=1
if cont < 3:
print " P(%s|pos) = %.10f" % (word, prob['1'])
print " P(%s|neg) = %.10f" % (word, prob['0'])
print ""
print "Lendo o arquivo de teste (testSetFile.csv) para memória, limpando e preparando..."
listOfDocuments = readFileToLists("testSetFile.csv")
print "Concluído"
print ""
print "Testando cada documento do arquivo de teste e contabilizando acertos..."
hits = 0 #contador de acertos
miss = 0 #contador de erros
posPos=0
posNeg=0
negPos=0
negNeg=0
for doc in listOfDocuments:
# attributes from the list
document = doc[0]
sentiment = doc[1]
tweet = doc[2]
identifiedClass = nb.classificator(tweet, removeStopWords)
# counting the right hits
if sentiment==identifiedClass:
hits+=1
if sentiment=='1':
posPos+=1
else:
negNeg+=1
else:
miss+=1
if sentiment=='1':
posNeg+=1
else:
negPos+=1
print "Concluído"
print ""
print "Resultados do teste:"
print "%s acertos em %s documentos" % (hits, len(listOfDocuments))
print "%.2f %% de acertos" % (100 * float(hits)/float(len(listOfDocuments)))
print "%.2f %% de erros" % (100 * float(miss)/float(len(listOfDocuments)))
print "+-----+---------+---------+"
print "| | POS | NEG |"
print "+-----+---------+---------+"
print "| POS | %07d | %07d |" % (posPos, posNeg)
print "| NEG | %07d | %07d |" % (negPos, negNeg)
print "+-----+---------+---------+"
print ""
def crossvalidationFlow(removeStopWords=False):
qtdeFolds = 10
print "Embaralhando dados e separando em %s conjuntos (crossvalidation-{1..%s}.csv)" % (qtdeFolds, qtdeFolds)
separateTrainAndTestGroupsUsingCrossvalidation("../Sentiment Analysis Dataset.csv", qtdeFolds)
print "Concluído"
print ""
print "Preparando as rodadas de crossvalidation"
trainList={}
testList={}
for j in range(1, 1+qtdeFolds):
print " Rodada %s do crossvalidation" % j
for i in range(1, 1+qtdeFolds):
filename = 'crossvalidation-{}.csv'.format(i)
tmpList = readFileToLists(filename)
if i==j:
testList[j] = tmpList
print " Arquivo %s separado para teste" % filename
else:
if j in trainList:
trainList[j] = itertools.chain(trainList[j], tmpList)
else:
trainList[j] = tmpList
# print "Arquivo %s separado para treino" % filename
print "Concluído"
print ""
print "Usando o naive bayes nas rodadas do crossvalidation"
hitsList=[]
missList=[]
for i in range(1,1+qtdeFolds):
print " Treinamento número %s" % i
nb = NaiveBayesWordClassifier.NaiveBayesWordClassifier()
nb.train(trainList[i], removeStopWords)
hits = 0 #contador de acertos
miss = 0 #contador de acertos
print " Teste número %s" % i
for doc in testList[i]:
# attributes from the list
document = doc[0]
sentiment = doc[1]
tweet = doc[2]
identifiedClass = nb.classificator(tweet, removeStopWords)
# counting the right hits
if sentiment==identifiedClass:
hits+=1
else:
miss+=1
hitsPercent = 100 * float(hits)/float(len(testList[i]))
missPercent = 100 * float(miss)/float(len(testList[i]))
print " Acuracia: %.2f %%" % hitsPercent
print " Erro: %.2f %%" % missPercent
hitsList.append(hitsPercent)
missList.append(missPercent)
avgHits = sum(hitsList) / float(len(hitsList))
avgMiss = sum(missList) / float(len(missList))
summary = 0
for i in hitsList:
summary = summary + ((i-avgHits)*(i-avgHits))
desvioPadraoHits = math.sqrt(summary / float(len(hitsList)))
summary = 0
for i in missList:
summary = summary + ((i-avgMiss)*(i-avgMiss))
desvioPadraoMiss = math.sqrt(summary / float(len(missList)))
print "Média de acertos: %.2f %% com desvio padrão de %.2f" % (avgHits, desvioPadraoHits)
print "Média de erros: %.2f %% com desvio padrão de %.2f" % (avgMiss, desvioPadraoMiss)
print ""
def main():
print "================= Holdout =================="
holdoutFlow()
print "======= Holdout removendo stop words ======="
holdoutFlow(True)
print "============= Cross Validation =============="
crossvalidationFlow()
print "=== Cross Validation removendo stop words ==="
crossvalidationFlow(True)
print "Concluído"
print ""
print "Fim"
print ""
if __name__ == '__main__':
main()
# | mit | 3,158,913,511,174,391,300 | 28.973422 | 181 | 0.587518 | false |
kivy-garden/garden.rotabox | examples/_rotabutton_example.py | 1 | 1061 | from kivy.base import runTouchApp
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
import sys, os
sys.path.append(os.path.abspath(".."))
from rotabox import Rotabox
Builder.load_string('''
<Root>:
RotaButton:
size: 200, 50
center: 400, 300
img_source: 'atlas://data/images/defaulttheme/button'
on_press:
self.img_source = 'atlas://data/images/defaulttheme/button_pressed'
self.angle -= 5
if not self.angle: self.angle -= .0000001 # if angle is 0 canvas doesn't update ???
on_release:
self.img_source = 'atlas://data/images/defaulttheme/button'
canvas.before:
BorderImage:
source: self.img_source
pos: self.pos
size: self.size
Label:
size_hint: 1, 1
text: 'A Rotabox Button'
''')
class RotaButton(ButtonBehavior, Rotabox):
pass
class Root(FloatLayout):
pass
runTouchApp(Root())
| mit | -8,145,617,282,399,359,000 | 25.525 | 96 | 0.609802 | false |
ajkerr0/kappa | kappa/forcefield.py | 1 | 6117 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 21 13:16:37 2016
@author: Alex Kerr
Define general Forcefield class, and specific forcefields (AMBER, etc.) that inherit
the general one.
"""
import numpy as np
#forcefield class definitions
global_cutoff = 5.0 #angstroms
class Forcefield:
"""A classical forcefield that determines how atoms interact
Args:
name (str): Human readable string that identifies the forcefield.
eunits (float): The units of energy used in the ff, relative to kcal/mol.
lunits (float): The units of length used in the ff, relative to angstroms
lengths (bool): Boolean that determines if bond length interactions exist,
that is energy that is quadratic in the bond lengths.
angles (bool): Boolean that determines if bond angle interactions exist,
energy that is quadraic in the bond angles.
dihs (bool): Determines dihedral angle interactions,
energy is an effective Fourier series of the angle(s).
lj (bool): Determines Lennard-Jones non-bonded interactions.
es (bool): Determines electrostatic point charge interactions.
tersoff (bool): Determines Tersoff-type interactions."""
def __init__(self, name, eunits, lunits,
lengths, angles, dihs, imptors, lj, es, tersoff):
self.name = name
self.eunits = eunits #relative to kcal/mol
self.lunits = lunits #relative to angstroms
##########
self.lengths = lengths #bond length interaction
self.angles = angles #bond angle interaction
self.dihs = dihs #dihedral angle interaction
self.imptors = imptors #improper dihedral angle interaction
self.lj = lj #lennard-jones, non-bonded interaction
self.es = es #electrostatic (point charge), non-bonded interaction
self.tersoff = tersoff #tersoff interaction
class Amber(Forcefield):
"""Amber forcefield inheriting from Forcefield,
as presented by Cornell et al. (1994)"""
def __init__(self, lengths=True, angles=False, dihs=False, imptors=False, lj=False):
super().__init__("amber", 1.0, 1.0,
lengths, angles, dihs, imptors, lj, False, False)
self.atomtype_file = "ATOMTYPE_AMBER_KERR.DEF"
self.param_dir = "amber99"
class AmberEdit(Forcefield):
"""Amber forcefield inheriting from Forcefield,
as presented by Cornell et al. (1994). Edited to include
Kerr's united atoms for sidechains."""
def __init__(self, lengths=True, angles=True, dihs=True, imptors=False, lj=True):
super().__init__("amberedit", 1.0, 1.0,
lengths, angles, dihs, imptors, lj, False, False)
self.atomtype_file = "ATOMTYPE_AMBEREDIT_KERR.DEF"
self.param_dir = "amber99edit"
class Gaff(Forcefield):
"""General Amber Forcefield"""
def __init__(self, lengths=True, angles=False, dihs=False, imptors=False, lj=False):
super().__init__("amber", 1.0, 1.0,
lengths, angles, dihs, imptors, lj, False, False)
self.atomtype_file = "ATOMTYPE_GFF_KERR.DEF"
self.param_dir = "gaff"
class Tersoff(Forcefield):
"""Under construction, a remnant of code past."""
def __init__(self, name="tersoff", energyUnits=0.043, lengthUnits=1.0):
super().__init__(self, "tersoff", 0.0433634, 1.0)
def configure_parameters(self, molecule):
pass
def define_energy_routine(self,molecule,grad=True):
atomList = molecule.atomList
#Tersoff potential terms
#define some constants, specific to carbon
A = 1.3936e3 #eV
B = 3.467e2 #eV
LAMBDA1 = 3.4879 #angstrom^-1
LAMBDA2 = 2.2119 #angstrom^-1
BETA = 1.5724e-7
N = 0.72751
C = 3.8049e4
D = 4.384
H = -5.7058e-1
#values for calculating g(theta) terms
c2 = C*C
d2 = D*D
g1 = 1 + (c2/d2)
#values for calculating bij terms
bijPower = -1/2/N
def calculate_e(index):
e = 0.0
if index is None:
eList = range(len(atomList))
else:
eList = atomList[index].nList[:]
eList.append(index)
for i in eList:
atomi = atomList[i]
posi = atomi.pos
ineighbors = atomi.nList
for j in ineighbors:
atomj = atomList[j]
posj = atomj.pos
posij = posi - posj
rij = np.sqrt(posij.dot(posij))
#initialize zetaij
zetaij = 0.0
for j2 in [x for x in ineighbors if x != j]:
atomj2 = atomList[j2]
posj2 = atomj2.pos
posij2 = posi - posj2
rij2 = np.sqrt(posij2.dot(posij2))
cosTheta = posij.dot(posij2)/rij/rij2
gTerm = (H - cosTheta)*(H - cosTheta)
g = g1 - c2/(d2 + gTerm)
#increment zetaij by g
zetaij += g
fR = A*np.exp(-LAMBDA1*rij)
fA = B*np.exp(-LAMBDA2*rij)
#aij term
# aij = 1
#bond angle term
bijTerm = 1 + np.power(BETA*zetaij, N)
bij = np.power(bijTerm, bijPower)
e += fR - (bij*fA)
e *= 0.5
return e
| mit | -195,136,923,481,602,200 | 36.759259 | 90 | 0.510381 | false |
spgill/python-spgill | spgill/printer/modules/formatting.py | 1 | 10499 | # stdlib imports
import contextlib
# vendor imports
# local imports
from spgill.printer import commands
class FormattingModule:
"""Mixin for text formatting functions."""
# Default flags
_flags = {
"encoding": None,
"inverted": False, # Flag for inverted text
"justification": 0, # Flag for text justification
}
# Binary flags for each of the format flags. These flag keys are
# subsequently injected into the normal tags dictionary.
_formatFlags = {
"small": 0x01,
"emphasize": 0x08,
"doubleHeight": 0x10,
"doubleWidth": 0x20,
"underline": 0x80,
}
# Insert format flags into the mixin flags
for _key in _formatFlags:
_flags[_key] = False
def text(self, t):
"""
Write text to the buffer, encoding the characters according to the
currently selected (or default) encoding.
"""
# Encode the text
self.write(
t.encode(
self.flags.encoding
or self.connection.props.printerEncoding.value,
"replace",
)
)
return self
def currentWidth(self):
"""Return the current line character width; as determined by flags."""
if self.flags.small:
return self.connection.props.printerLineWidthSmall.value
return self.connection.props.printerLineWidth.value
def format(self, **kwargs):
"""
Apply formatting to the text based on boolean keyword arguments.
*Not all format modes are supported by all printer models.*
Possible keywords are `small`, `emphasize`, `doubleHeight`,
`doubleWidth`, and `underline`. Truthy or falsy values are accepted.
Only passed keywords will affect text formatting. Passing no keywords
will result in the value of all keyword values being returned as a
dictionary.
Passing `reset=True` as a keyword argument will reset all formatting
keywords to `False`.
"""
# If no kwargs are passed, return the current formatting
if not kwargs:
return {key: getattr(self.flags, key) for key in self._formatFlags}
# Iterate through each formatting flag and construct the mode byte
mode = 0x00
for key, flag in self._formatFlags.items():
# If key is specified in the kwargs, update the flag
if key in kwargs:
setattr(self.flags, key, bool(kwargs[key]))
# Reset kwargs overrides any other kwarg
if "reset" in kwargs:
setattr(self.flags, key, False)
# Write the flag to the mode
if getattr(self.flags, key):
mode |= flag
# Write the command
self.write(commands.compose(commands.ESC, 0x21, mode))
return self
@contextlib.contextmanager
def formatBlock(self, **kwargs):
"""
Use `spgill.printer.Job.format` to format text contextually.
Function same keywords as `format` with the exception of `reset`,
which will always be ignored.
Example usage:
#!python
job.text('This is normal text \\n')
with formatBlock(small=True):
job.text('This is small text \\n')
job.text('The text is normal again \\n')
"""
# `reset` is not an allowed flag
kwargs.pop("reset", None)
# first, enact the format flags
self.format(**kwargs)
# yield and wait until end of the block
yield
# finally, reverse the tags
self.format(**{k: not v for k, v in kwargs.items()})
def br(self, n=1):
"""Write `n` number of newlines to the buffer."""
self.write(b"\n" * n)
return self
def split(self, half1, half2, sep=" ", width=None):
"""
Seperate two strings on a line by a character `sep`.
`half1` is the first string.
`half2` is the second string.
`sep` is the character used to separate them.
`width` is the character width of the line (if `None`, defaults to the
return value of `spgill.printer.Job.currentWidth`).
Example usage:
#!python
job.split('banana', 'orange', sep='+', width=18)
# would result in printing a string:
# 'banana++++++orange'
"""
width = width or self.currentWidth()
halves = len(half1) + len(half2)
if halves == width:
self.text(half1 + half2)
elif halves > width:
diff = halves - width - 3
self.text(half1[:-diff] + (sep * 3) + half2)
else:
diff = width - halves
self.text(half1 + (sep * diff) + half2)
return self
def around(self, pieces, sep=" ", width=None):
"""
Equally pad pieces of text with a character `sep`
`pieces` is a list of strings to separate.
`sep` is the character used to separate them.
`width` is the character width of the line (if `None`, defaults to the
return value of `spgill.printer.Job.currentWidth`).
Example usage:
#!python
job.around(['banana', 'orange'], sep='+', width=18)
# would result in printing a string:
# '++banana++orange++'
"""
width = width or self.currentWidth()
out = ""
padding = (width - sum([len(p) for p in pieces])) // (len(pieces) + 1)
out += padding * sep
for piece in pieces:
out += piece
out += padding * sep
out += (width - len(out)) * sep
self.text(out)
return self
def center(self, text, pad=" ", width=None):
"""
Center text on a line padded by character `pad`.
`text` is the text to be centered.
`pad` is the character used to pad the text.
`width` is the character width of the line (if `None`, defaults to the
return value of `spgill.printer.Job.currentWidth`).
Example usage:
#!python
job.center('banana', pad='+', width=18)
# would result in printing a string:
# '++++++banana++++++'
"""
width = width or self.currentWidth()
diff = width - len(text)
half = diff // 2
out = pad * half + text + pad * half
while len(out) < width:
out += pad
self.text(out)
return self
def invert(self, flag=None):
"""
Invert the foreground and background color.
black text on white background -> white text on black background
If `flag` is truthy, text is inverted. Otherwise, text is returned
to normal.
If `flag` is `None`, the current inversion status is returned.
"""
# If flag is not given, return current value
if flag is None:
return self.flags.inverted
# Else, write the flag
self.flags.inverted = flag
self.write(commands.compose(commands.GS, 0x42, bool(flag)))
return self
@contextlib.contextmanager
def invertBlock(self, flag=True):
"""
Use `spgill.printer.Job.invert` to invert text contextually.
`flag` functions the same as the original function, but defaults to
a value of `True`.
Example usage:
#!python
job.text('This is normal text \\n')
with invertBlock():
job.text('This text is inverted \\n')
job.text('The text is normal again \\n')
"""
self.invert(flag)
yield
self.invert(not flag)
_justifyMap = {"left": 0, "center": 1, "right": 2}
def justify(self, mode=None):
"""
Change the text justification using native ESC/POS.
`mode` is a string of `left`, `center`, or `right`. Case-insensitive.
If `mode` is `None`, returns the current justification (as an `int`).
"""
# If mode is not given, return the current one
if mode is None:
return self.flags.justification
# If mode is a string, resolve it to an int flag
if isinstance(mode, str):
mode = self._justifyMap[mode.lower()]
# Now write the flag!
self.write(commands.compose(commands.ESC, 0x61, mode))
return self
@contextlib.contextmanager
def justifyBlock(self, mode):
"""
Use `spgill.printer.Job.justify` to justify text contextually.
`mode` functions the same as the original function.
Example usage:
#!python
job.text('This is normal text \\n')
with justifyBlock('right'):
job.text('This text is right-aligned \\n')
job.text('The text is normal again \\n')
"""
previous = self.flags.justification
self.justify(mode)
yield
self.justify(previous)
def spacing(self, n=None):
"""
Change the character line spacing.
`n` is number of dots between lines.
If `n` is None, line spacing is reset to default value.
"""
self.write(
commands.compose(commands.ESC, 0x32 if n is None else 0x33, n)
)
return self
# Text codec mapping
_encodings = {"ascii": 0, "cp850": 2, "cp858": 19, "iso8859_15": 40}
def encoding(self, name=None):
"""
Change the character encoding table.
*Not supported equally on every printer model.*
Valid (case-insensitive) codec names can be retrieved by the
`spgill.printer.Job.encodingNames` function. If no name given, returns
the currently selected character encoding.
"""
name = name.lower()
# If none is specified, reset to default
if name is None:
name = self.connection.props.printerEncoding.value
# Store it in the flag
self.flags.encoding = name
# Make sure it exists
if name not in self._encodings:
raise KeyError(f"`{name}` is not a supported text encoding!")
# Write it to the output
self.write(commands.compose(commands.ESC, 0x74, self._encodings[name]))
return self
def encodingNames(self):
"""Return valid text encoding names."""
return self._encodings.keys()
| mit | -5,918,906,474,870,654,000 | 27.843407 | 79 | 0.569197 | false |
vgamula/asd | asd/projects/migrations/0001_initial.py | 1 | 4789 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('last_name', models.CharField(max_length=255, verbose_name=b'Last name')),
('first_name', models.CharField(max_length=255, verbose_name=b'First name')),
('middle_name', models.CharField(max_length=255, verbose_name=b'Middle name')),
('birthday', models.DateField(verbose_name=b'Birthday')),
],
options={
'verbose_name_plural': 'Employees',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Position',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(unique=True, max_length=255, verbose_name=b'Title')),
('rate', models.DecimalField(verbose_name=b'Rate', max_digits=3, decimal_places=3)),
],
options={
'verbose_name_plural': 'Positions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Procurance',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('work_amount', models.IntegerField(verbose_name=b'Work amount')),
('start_date', models.DateField(verbose_name=b'Start date')),
('planned_end_date', models.DateField(verbose_name=b'Planned end date')),
('real_end_date', models.DateField(null=True, verbose_name=b'Real end date')),
('employee', models.ForeignKey(verbose_name=b'Employee', to='projects.Employee')),
],
options={
'verbose_name_plural': 'Procurances',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(unique=True, max_length=255, verbose_name=b'Title')),
('deadline', models.DateField(verbose_name=b'Deadline')),
('price', models.DecimalField(verbose_name=b'Price', max_digits=3, decimal_places=3)),
('details', models.TextField(verbose_name=b'Details')),
('manager', models.ForeignKey(verbose_name=b'Manager', to='projects.Employee')),
],
options={
'verbose_name_plural': 'Projects',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProjectType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255, verbose_name=b'Title')),
],
options={
'verbose_name_plural': 'Project types',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Work',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255, verbose_name=b'Title')),
],
options={
'verbose_name_plural': 'Works',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='project',
name='type',
field=models.ForeignKey(verbose_name=b'Project type', to='projects.ProjectType'),
preserve_default=True,
),
migrations.AddField(
model_name='procurance',
name='project',
field=models.ForeignKey(verbose_name=b'Project', to='projects.Project'),
preserve_default=True,
),
migrations.AddField(
model_name='procurance',
name='work',
field=models.ForeignKey(verbose_name=b'Work', to='projects.Work'),
preserve_default=True,
),
migrations.AddField(
model_name='employee',
name='position',
field=models.ForeignKey(verbose_name=b'Position', to='projects.Position'),
preserve_default=True,
),
]
| mit | 5,522,352,361,105,949,000 | 40.643478 | 114 | 0.533514 | false |
vulgur/WeeklyFoodPlan | WeeklyFoodPlan/meal_array_generator.py | 1 | 3000 | # coding=utf-8
import re
original_names_string = '蒸羊羔、蒸熊掌、蒸鹿尾儿、烧花鸭、烧雏鸡儿、烧子鹅、卤煮咸鸭、酱鸡、腊肉、松花、小肚儿、晾肉、香肠、什锦苏盘、熏鸡、白肚儿、清蒸八宝猪、江米酿鸭子、罐儿野鸡、罐儿鹌鹑、卤什锦、卤子鹅、卤虾、烩虾、炝虾仁儿、山鸡、兔脯、菜蟒、银鱼、清蒸哈什蚂、烩鸭腰儿、烩鸭条儿、清拌鸭丝儿、黄心管儿、焖白鳝、焖黄鳝、豆鼓鲇鱼、锅烧鲇鱼、烀皮甲鱼、锅烧鲤鱼、抓炒鲤鱼、软炸里脊、软炸鸡、什锦套肠、麻酥油卷儿、熘鲜蘑、熘鱼脯儿、熘鱼片儿、熘鱼肚儿、醋熘肉片儿、熘白蘑、烩三鲜、炒银鱼、烩鳗鱼、清蒸火腿、炒白虾、炝青蛤、炒面鱼、炝芦笋、芙蓉燕菜、炒肝尖儿、南炒肝关儿、油爆肚仁儿、汤爆肚领儿、炒金丝、烩银丝、糖熘饹炸儿、糖熘荸荠、蜜丝山药、拔丝鲜桃、熘南贝、炒南贝、烩鸭丝、烩散丹、清蒸鸡、黄焖鸡、大炒鸡、熘碎鸡、香酥鸡,炒鸡丁儿、熘鸡块儿、三鲜丁儿、八宝丁儿、清蒸玉兰片、炒虾仁儿、炒腰花儿、炒蹄筋儿、锅烧海参、锅烧白菜、炸海耳、浇田鸡、桂花翅子、清蒸翅子、炸飞禽、炸葱、炸排骨、烩鸡肠肚儿、烩南荠、盐水肘花儿,拌瓤子、燉吊子、锅烧猪蹄儿、烧鸳鸯、烧百合、烧苹果、酿果藕、酿江米、炒螃蟹。汆大甲、什锦葛仙米、石鱼、带鱼、黄花鱼、油泼肉、酱泼肉、红肉锅子,白肉锅子、菊花锅子。野鸡锅子、元宵锅子、杂面锅子、荸荠一品锅子、软炸飞禽、龙虎鸡蛋、猩唇、驼峰、鹿茸、熊掌、奶猪、奶鸭子、杠猪、挂炉羊、清蒸江瑶柱、糖熘鸡头米、拌鸡丝儿、拌肚丝儿、什锦豆腐、什锦丁儿、精虾、精蟹、精鱼、精熘鱼片儿、熘蟹肉、炒蟹肉、清拌蟹肉、蒸南瓜、酿倭瓜、炒丝瓜、焖冬瓜、焖鸡掌、焖鸭掌、焖笋、熘茭白、茄干儿晒卤肉、鸭羹、蟹肉羹、三鲜木樨汤、红丸子、白丸子、熘丸子、炸丸子、三鲜丸子、四喜丸子、汆丸子、葵花丸子、饹炸丸子、豆腐丸子、红炖肉、白炖肉、松肉、扣肉、烤肉、酱肉、荷叶卤、一品肉、樱桃肉、马牙肉、酱豆腐肉、坛子肉、罐儿肉、元宝肉、福禄肉、红肘子、白肘子、水晶肘子、蜜蜡肘子、烧烀肘子、扒肘条儿、蒸羊肉、烧羊肉、五香羊肉、酱羊肉.汆三样儿、爆三样儿、烧紫盖儿、炖鸭杂儿、熘白杂碎、三鲜鱼翅、栗子鸡、尖汆活鲤鱼、板鸭、筒子鸡'
if __name__ == '__main__':
split_words = re.split("、|,|。", original_names_string)
array_str = ''
for word in split_words:
array_str += '"{0}",'.format(word)
array_str = array_str[:-1]
final_str = '[{0}]'.format(array_str)
print(final_str)
| mit | -890,768,529,198,658,600 | 80.066667 | 915 | 0.715461 | false |
xavfernandez/pip | tests/unit/test_wheel.py | 1 | 16033 | """Tests for wheel binary packages and .dist-info."""
import csv
import logging
import os
import textwrap
from email import message_from_string
import pytest
from mock import patch
from pip._vendor.packaging.requirements import Requirement
from pip._internal.locations import get_scheme
from pip._internal.models.scheme import Scheme
from pip._internal.operations.build.wheel_legacy import (
get_legacy_build_wheel_path,
)
from pip._internal.operations.install import wheel
from pip._internal.operations.install.wheel import (
MissingCallableSuffix,
_raise_for_invalid_entrypoint,
)
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.misc import hash_file
from pip._internal.utils.unpacking import unpack_file
from tests.lib import DATA_DIR, assert_paths_equal
def call_get_legacy_build_wheel_path(caplog, names):
wheel_path = get_legacy_build_wheel_path(
names=names,
temp_dir='/tmp/abcd',
name='pendulum',
command_args=['arg1', 'arg2'],
command_output='output line 1\noutput line 2\n',
)
return wheel_path
def test_get_legacy_build_wheel_path(caplog):
actual = call_get_legacy_build_wheel_path(caplog, names=['name'])
assert_paths_equal(actual, '/tmp/abcd/name')
assert not caplog.records
def test_get_legacy_build_wheel_path__no_names(caplog):
caplog.set_level(logging.INFO)
actual = call_get_legacy_build_wheel_path(caplog, names=[])
assert actual is None
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == 'WARNING'
assert record.message.splitlines() == [
"Legacy build of wheel for 'pendulum' created no files.",
"Command arguments: arg1 arg2",
'Command output: [use --verbose to show]',
]
def test_get_legacy_build_wheel_path__multiple_names(caplog):
caplog.set_level(logging.INFO)
# Deliberately pass the names in non-sorted order.
actual = call_get_legacy_build_wheel_path(
caplog, names=['name2', 'name1'],
)
assert_paths_equal(actual, '/tmp/abcd/name1')
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == 'WARNING'
assert record.message.splitlines() == [
"Legacy build of wheel for 'pendulum' created more than one file.",
"Filenames (choosing first): ['name1', 'name2']",
"Command arguments: arg1 arg2",
'Command output: [use --verbose to show]',
]
@pytest.mark.parametrize("console_scripts",
["pip = pip._internal.main:pip",
"pip:pip = pip._internal.main:pip"])
def test_get_entrypoints(tmpdir, console_scripts):
entry_points = tmpdir.joinpath("entry_points.txt")
with open(str(entry_points), "w") as fp:
fp.write("""
[console_scripts]
{}
[section]
common:one = module:func
common:two = module:other_func
""".format(console_scripts))
assert wheel.get_entrypoints(str(entry_points)) == (
dict([console_scripts.split(' = ')]),
{},
)
def test_raise_for_invalid_entrypoint_ok():
_raise_for_invalid_entrypoint("hello = hello:main")
@pytest.mark.parametrize("entrypoint", [
"hello = hello",
"hello = hello:",
])
def test_raise_for_invalid_entrypoint_fail(entrypoint):
with pytest.raises(MissingCallableSuffix):
_raise_for_invalid_entrypoint(entrypoint)
@pytest.mark.parametrize("outrows, expected", [
([
('', '', 'a'),
('', '', ''),
], [
('', '', ''),
('', '', 'a'),
]),
([
# Include an int to check avoiding the following error:
# > TypeError: '<' not supported between instances of 'str' and 'int'
('', '', 1),
('', '', ''),
], [
('', '', ''),
('', '', 1),
]),
])
def test_sorted_outrows(outrows, expected):
actual = wheel.sorted_outrows(outrows)
assert actual == expected
def call_get_csv_rows_for_installed(tmpdir, text):
path = tmpdir.joinpath('temp.txt')
path.write_text(text)
# Test that an installed file appearing in RECORD has its filename
# updated in the new RECORD file.
installed = {'a': 'z'}
changed = set()
generated = []
lib_dir = '/lib/dir'
with wheel.open_for_csv(path, 'r') as f:
reader = csv.reader(f)
outrows = wheel.get_csv_rows_for_installed(
reader, installed=installed, changed=changed,
generated=generated, lib_dir=lib_dir,
)
return outrows
def test_get_csv_rows_for_installed(tmpdir, caplog):
text = textwrap.dedent("""\
a,b,c
d,e,f
""")
outrows = call_get_csv_rows_for_installed(tmpdir, text)
expected = [
('z', 'b', 'c'),
('d', 'e', 'f'),
]
assert outrows == expected
# Check there were no warnings.
assert len(caplog.records) == 0
def test_get_csv_rows_for_installed__long_lines(tmpdir, caplog):
text = textwrap.dedent("""\
a,b,c,d
e,f,g
h,i,j,k
""")
outrows = call_get_csv_rows_for_installed(tmpdir, text)
expected = [
('z', 'b', 'c', 'd'),
('e', 'f', 'g'),
('h', 'i', 'j', 'k'),
]
assert outrows == expected
messages = [rec.message for rec in caplog.records]
expected = [
"RECORD line has more than three elements: ['a', 'b', 'c', 'd']",
"RECORD line has more than three elements: ['h', 'i', 'j', 'k']"
]
assert messages == expected
@pytest.mark.parametrize("text,expected", [
("Root-Is-Purelib: true", True),
("Root-Is-Purelib: false", False),
("Root-Is-Purelib: hello", False),
("", False),
("root-is-purelib: true", True),
("root-is-purelib: True", True),
])
def test_wheel_root_is_purelib(text, expected):
assert wheel.wheel_root_is_purelib(message_from_string(text)) == expected
class TestWheelFile(object):
def test_unpack_wheel_no_flatten(self, tmpdir):
filepath = os.path.join(DATA_DIR, 'packages',
'meta-1.0-py2.py3-none-any.whl')
unpack_file(filepath, tmpdir)
assert os.path.isdir(os.path.join(tmpdir, 'meta-1.0.dist-info'))
class TestInstallUnpackedWheel(object):
"""
Tests for moving files from wheel src to scheme paths
"""
def prep(self, data, tmpdir):
self.name = 'sample'
self.wheelpath = data.packages.joinpath(
'sample-1.2.0-py2.py3-none-any.whl')
self.req = Requirement('sample')
self.src = os.path.join(tmpdir, 'src')
self.dest = os.path.join(tmpdir, 'dest')
self.scheme = Scheme(
purelib=os.path.join(self.dest, 'lib'),
platlib=os.path.join(self.dest, 'lib'),
headers=os.path.join(self.dest, 'headers'),
scripts=os.path.join(self.dest, 'bin'),
data=os.path.join(self.dest, 'data'),
)
self.src_dist_info = os.path.join(
self.src, 'sample-1.2.0.dist-info')
self.dest_dist_info = os.path.join(
self.scheme.purelib, 'sample-1.2.0.dist-info')
def assert_installed(self):
# lib
assert os.path.isdir(
os.path.join(self.scheme.purelib, 'sample'))
# dist-info
metadata = os.path.join(self.dest_dist_info, 'METADATA')
assert os.path.isfile(metadata)
# data files
data_file = os.path.join(self.scheme.data, 'my_data', 'data_file')
assert os.path.isfile(data_file)
# package data
pkg_data = os.path.join(
self.scheme.purelib, 'sample', 'package_data.dat')
assert os.path.isfile(pkg_data)
def test_std_install(self, data, tmpdir):
self.prep(data, tmpdir)
wheel.install_wheel(
self.name,
self.wheelpath,
scheme=self.scheme,
req_description=str(self.req),
)
self.assert_installed()
def test_install_prefix(self, data, tmpdir):
prefix = os.path.join(os.path.sep, 'some', 'path')
self.prep(data, tmpdir)
scheme = get_scheme(
self.name,
user=False,
home=None,
root=tmpdir,
isolated=False,
prefix=prefix,
)
wheel.install_wheel(
self.name,
self.wheelpath,
scheme=scheme,
req_description=str(self.req),
)
bin_dir = 'Scripts' if WINDOWS else 'bin'
assert os.path.exists(os.path.join(tmpdir, 'some', 'path', bin_dir))
assert os.path.exists(os.path.join(tmpdir, 'some', 'path', 'my_data'))
def test_dist_info_contains_empty_dir(self, data, tmpdir):
"""
Test that empty dirs are not installed
"""
# e.g. https://github.com/pypa/pip/issues/1632#issuecomment-38027275
self.prep(data, tmpdir)
src_empty_dir = os.path.join(
self.src_dist_info, 'empty_dir', 'empty_dir')
os.makedirs(src_empty_dir)
assert os.path.isdir(src_empty_dir)
wheel.install_wheel(
self.name,
self.wheelpath,
scheme=self.scheme,
req_description=str(self.req),
_temp_dir_for_testing=self.src,
)
self.assert_installed()
assert not os.path.isdir(
os.path.join(self.dest_dist_info, 'empty_dir'))
class TestMessageAboutScriptsNotOnPATH(object):
tilde_warning_msg = (
"NOTE: The current PATH contains path(s) starting with `~`, "
"which may not be expanded by all applications."
)
def _template(self, paths, scripts):
with patch.dict('os.environ', {'PATH': os.pathsep.join(paths)}):
return wheel.message_about_scripts_not_on_PATH(scripts)
def test_no_script(self):
retval = self._template(
paths=['/a/b', '/c/d/bin'],
scripts=[]
)
assert retval is None
def test_single_script__single_dir_not_on_PATH(self):
retval = self._template(
paths=['/a/b', '/c/d/bin'],
scripts=['/c/d/foo']
)
assert retval is not None
assert "--no-warn-script-location" in retval
assert "foo is installed in '/c/d'" in retval
assert self.tilde_warning_msg not in retval
def test_two_script__single_dir_not_on_PATH(self):
retval = self._template(
paths=['/a/b', '/c/d/bin'],
scripts=['/c/d/foo', '/c/d/baz']
)
assert retval is not None
assert "--no-warn-script-location" in retval
assert "baz and foo are installed in '/c/d'" in retval
assert self.tilde_warning_msg not in retval
def test_multi_script__multi_dir_not_on_PATH(self):
retval = self._template(
paths=['/a/b', '/c/d/bin'],
scripts=['/c/d/foo', '/c/d/bar', '/c/d/baz', '/a/b/c/spam']
)
assert retval is not None
assert "--no-warn-script-location" in retval
assert "bar, baz and foo are installed in '/c/d'" in retval
assert "spam is installed in '/a/b/c'" in retval
assert self.tilde_warning_msg not in retval
def test_multi_script_all__multi_dir_not_on_PATH(self):
retval = self._template(
paths=['/a/b', '/c/d/bin'],
scripts=[
'/c/d/foo', '/c/d/bar', '/c/d/baz',
'/a/b/c/spam', '/a/b/c/eggs'
]
)
assert retval is not None
assert "--no-warn-script-location" in retval
assert "bar, baz and foo are installed in '/c/d'" in retval
assert "eggs and spam are installed in '/a/b/c'" in retval
assert self.tilde_warning_msg not in retval
def test_two_script__single_dir_on_PATH(self):
retval = self._template(
paths=['/a/b', '/c/d/bin'],
scripts=['/a/b/foo', '/a/b/baz']
)
assert retval is None
def test_multi_script__multi_dir_on_PATH(self):
retval = self._template(
paths=['/a/b', '/c/d/bin'],
scripts=['/a/b/foo', '/a/b/bar', '/a/b/baz', '/c/d/bin/spam']
)
assert retval is None
def test_multi_script__single_dir_on_PATH(self):
retval = self._template(
paths=['/a/b', '/c/d/bin'],
scripts=['/a/b/foo', '/a/b/bar', '/a/b/baz']
)
assert retval is None
def test_single_script__single_dir_on_PATH(self):
retval = self._template(
paths=['/a/b', '/c/d/bin'],
scripts=['/a/b/foo']
)
assert retval is None
def test_PATH_check_case_insensitive_on_windows(self):
retval = self._template(
paths=['C:\\A\\b'],
scripts=['c:\\a\\b\\c', 'C:/A/b/d']
)
if WINDOWS:
assert retval is None
else:
assert retval is not None
assert self.tilde_warning_msg not in retval
def test_trailing_ossep_removal(self):
retval = self._template(
paths=[os.path.join('a', 'b', '')],
scripts=[os.path.join('a', 'b', 'c')]
)
assert retval is None
def test_missing_PATH_env_treated_as_empty_PATH_env(self):
scripts = ['a/b/foo']
env = os.environ.copy()
del env['PATH']
with patch.dict('os.environ', env, clear=True):
retval_missing = wheel.message_about_scripts_not_on_PATH(scripts)
with patch.dict('os.environ', {'PATH': ''}):
retval_empty = wheel.message_about_scripts_not_on_PATH(scripts)
assert retval_missing == retval_empty
def test_no_script_tilde_in_path(self):
retval = self._template(
paths=['/a/b', '/c/d/bin', '~/e', '/f/g~g'],
scripts=[]
)
assert retval is None
def test_multi_script_all_tilde__multi_dir_not_on_PATH(self):
retval = self._template(
paths=['/a/b', '/c/d/bin', '~e/f'],
scripts=[
'/c/d/foo', '/c/d/bar', '/c/d/baz',
'/a/b/c/spam', '/a/b/c/eggs', '/e/f/tilde'
]
)
assert retval is not None
assert "--no-warn-script-location" in retval
assert "bar, baz and foo are installed in '/c/d'" in retval
assert "eggs and spam are installed in '/a/b/c'" in retval
assert "tilde is installed in '/e/f'" in retval
assert self.tilde_warning_msg in retval
def test_multi_script_all_tilde_not_at_start__multi_dir_not_on_PATH(self):
retval = self._template(
paths=['/e/f~f', '/c/d/bin'],
scripts=[
'/c/d/foo', '/c/d/bar', '/c/d/baz',
'/e/f~f/c/spam', '/e/f~f/c/eggs'
]
)
assert retval is not None
assert "--no-warn-script-location" in retval
assert "bar, baz and foo are installed in '/c/d'" in retval
assert "eggs and spam are installed in '/e/f~f/c'" in retval
assert self.tilde_warning_msg not in retval
class TestWheelHashCalculators(object):
def prep(self, tmpdir):
self.test_file = tmpdir.joinpath("hash.file")
# Want this big enough to trigger the internal read loops.
self.test_file_len = 2 * 1024 * 1024
with open(str(self.test_file), "w") as fp:
fp.truncate(self.test_file_len)
self.test_file_hash = \
'5647f05ec18958947d32874eeb788fa396a05d0bab7c1b71f112ceb7e9b31eee'
self.test_file_hash_encoded = \
'sha256=VkfwXsGJWJR9ModO63iPo5agXQurfBtx8RLOt-mzHu4'
def test_hash_file(self, tmpdir):
self.prep(tmpdir)
h, length = hash_file(self.test_file)
assert length == self.test_file_len
assert h.hexdigest() == self.test_file_hash
def test_rehash(self, tmpdir):
self.prep(tmpdir)
h, length = wheel.rehash(self.test_file)
assert length == str(self.test_file_len)
assert h == self.test_file_hash_encoded
| mit | -7,496,408,292,240,154,000 | 31.854508 | 78 | 0.572631 | false |
NicovincX2/Python-3.5 | Algorithmique/Mathématiques discrètes/Théorie de l'information/Théorie des codes/Distance de Levenshtein/levenshtein.py | 1 | 2428 | # -*- coding: utf-8 -*-
import os
"""
the operation ( =, -, +, * for respectively keep, delete, insert, substitute)
the coordinate in the first
and in the second string.
"""
def square_list(a, b, value=0):
return [[value, ] * b for j in range(a)]
def arg_min(* arg_list):
arg_s = None
for i, arg in enumerate(arg_list):
if i == 0 or arg < arg_s:
arg_s = arg
i_s = i
return i_s, arg_s
MODIFIED = 0
DELETED = 1
CREATED = 2
def levenshtein_distance(a, b):
""" return the levenshtein distance between two strings of list of """
len_a = len(a)
len_b = len(b)
d = square_list(len_a + 1, len_b + 1)
for i in range(1, len_a + 1):
d[i][0] = i
for j in range(1, len_b + 1):
d[0][j] = j
for j in range(1, len_b + 1):
for i in range(1, len_a + 1):
if a[i - 1] == b[j - 1]:
d[i][j] = d[i - 1][j - 1]
else:
d[i][j] = min(d[i - 1][j], d[i][j - 1], d[i - 1][j - 1]) + 1
return d[-1][-1]
def levenshtein_sequence(a, b):
""" return an explicit list of difference between a and b """
len_a = len(a)
len_b = len(b)
s = list()
d = square_list(len_a + 1, len_b + 1)
for i in range(1, len_a + 1):
d[i][0] = i
for j in range(1, len_b + 1):
d[0][j] = j
for j in range(1, len_b + 1):
for i in range(1, len_a + 1):
if a[i - 1] == b[j - 1]:
d[i][j] = d[i - 1][j - 1]
else:
d[i][j] = min(d[i - 1][j], d[i][j - 1], d[i - 1][j - 1]) + 1
prev_i, prev_j = i, j
while i > 0 and j > 0:
if i == 1 and j == 1:
if prev_i != i and prev_j != j:
u = MODIFIED
elif prev_i == i:
u = CREATED
elif prev_j == j:
u = DELETED
new_i, new_j = i - 1, j - 1
elif i == 1:
new_i, new_j = i, j - 1
u = CREATED
elif j == 1:
u = DELETED
new_i, new_j = i - 1, j
else:
u, null = arg_min(d[i - 1][j - 1], d[i - 1][j], d[i][j - 1])
new_i, new_j = i - (1, 1, 0)[u], j - (1, 0, 1)[u]
op = '*-+'[u] if d[i][j] != d[new_i][new_j] else '='
s.append((op, i - 1, j - 1))
prev_i, prev_j = i, j
i, j = new_i, new_j
return list(reversed(s))
os.system("pause")
| gpl-3.0 | -1,356,546,386,466,508,500 | 23.525253 | 77 | 0.421746 | false |
kdechant/eamon | player/migrations/0003_rating.py | 1 | 1049 | # Generated by Django 2.2.1 on 2019-07-04 04:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('adventure', '0058_auto_20190703_2149'),
('player', '0002_savedgame_created'),
]
operations = [
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.CharField(max_length=255, null=True)),
('overall', models.IntegerField(blank=True, null=True)),
('combat', models.IntegerField(blank=True, null=True)),
('puzzle', models.IntegerField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('adventure', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratings', to='adventure.Adventure')),
],
),
]
| mit | 740,330,355,191,959,000 | 37.851852 | 144 | 0.597712 | false |
irmen/Pyro5 | tests/test_threadpool.py | 1 | 4298 | """
Tests for the thread pool.
Pyro - Python Remote Objects. Copyright by Irmen de Jong ([email protected]).
"""
import time
import random
import pytest
from Pyro5 import socketutil, server
from Pyro5.svr_threads import Pool, PoolError, NoFreeWorkersError, SocketServer_Threadpool
from Pyro5 import config
JOB_TIME = 0.2
class Job(object):
def __init__(self, name="unnamed"):
self.name = name
def __call__(self):
time.sleep(JOB_TIME - random.random() / 10.0)
class SlowJob(object):
def __init__(self, name="unnamed"):
self.name = name
def __call__(self):
time.sleep(5*JOB_TIME - random.random() / 10.0)
class TestThreadPool:
def setup_method(self):
config.THREADPOOL_SIZE_MIN = 2
config.THREADPOOL_SIZE = 4
def teardown_method(self):
config.reset()
def testCreate(self):
with Pool() as jq:
_ = repr(jq)
assert jq.closed
def testSingle(self):
with Pool() as p:
job = Job()
p.process(job)
time.sleep(0.02) # let it pick up the job
assert len(p.busy) == 1
def testAllBusy(self):
try:
config.COMMTIMEOUT = 0.2
with Pool() as p:
for i in range(config.THREADPOOL_SIZE):
p.process(SlowJob(str(i+1)))
# putting one more than the number of workers should raise an error:
with pytest.raises(NoFreeWorkersError):
p.process(SlowJob("toomuch"))
finally:
config.COMMTIMEOUT = 0.0
def testClose(self):
with Pool() as p:
for i in range(config.THREADPOOL_SIZE):
p.process(Job(str(i + 1)))
with pytest.raises(PoolError):
p.process(Job("1")) # must not allow new jobs after closing
assert len(p.busy) == 0
assert len(p.idle) == 0
def testScaling(self):
with Pool() as p:
for i in range(config.THREADPOOL_SIZE_MIN-1):
p.process(Job("x"))
assert len(p.idle) == 1
assert len(p.busy) == config.THREADPOOL_SIZE_MIN-1
p.process(Job("x"))
assert len(p.idle) == 0
assert len(p.busy) == config.THREADPOOL_SIZE_MIN
# grow until no more free workers
while True:
try:
p.process(Job("x"))
except NoFreeWorkersError:
break
assert len(p.idle) == 0
assert len(p.busy) == config.THREADPOOL_SIZE
# wait till jobs are done and check ending situation
time.sleep(JOB_TIME*1.5)
assert len(p.busy) == 0
assert len(p.idle) == config.THREADPOOL_SIZE_MIN
class ServerCallback(server.Daemon):
def __init__(self):
super().__init__()
self.received_denied_reasons = []
def _handshake(self, connection, denied_reason=None):
self.received_denied_reasons.append(denied_reason) # store the denied reason
return True
def handleRequest(self, connection):
time.sleep(0.05)
def _housekeeping(self):
pass
class TestThreadPoolServer:
def setup_method(self):
config.THREADPOOL_SIZE_MIN = 1
config.THREADPOOL_SIZE = 1
config.POLLTIMEOUT = 0.5
config.COMMTIMEOUT = 0.5
def teardown_method(self):
config.reset()
def testServerPoolFull(self):
port = socketutil.find_probably_unused_port()
serv = SocketServer_Threadpool()
daemon = ServerCallback()
serv.init(daemon, "localhost", port)
serversock = serv.sock.getsockname()
csock1 = socketutil.create_socket(connect=serversock)
csock2 = socketutil.create_socket(connect=serversock)
try:
serv.events([serv.sock])
time.sleep(0.2)
assert daemon.received_denied_reasons == [None]
serv.events([serv.sock])
time.sleep(0.2)
assert len(daemon.received_denied_reasons) == 2
assert "no free workers, increase server threadpool size" in daemon.received_denied_reasons
finally:
csock1.close()
csock2.close()
serv.shutdown()
| mit | -3,677,530,667,182,278,000 | 28.847222 | 103 | 0.571196 | false |
joaander/hoomd-blue | hoomd/filter/__init__.py | 1 | 1556 | """Particle filters.
Particle filters describe criteria to select subsets of the particle in the
system for use by various operations throughout HOOMD. To maintain high
performance, filters are **not** re-evaluated on every use. Instead, each unique
particular filter (defined by the class name and hash) is mapped to a **group**,
an internally maintained list of the selected particles. Subsequent uses of the
same particle filter specification (in the same `Simulation`) will resolve to
the same group *and the originally selected particles*, **even if the state of
the system has changed.**
Groups are not completely static. HOOMD-blue re-evaluates the filter
specifications and updates the group membership whenever the number of particles
in the simulation changes. A future release will include an operation that you
can schedule to periodically update groups on demand.
For molecular dynamics simulations, each group maintains a count of the number
of degrees of freedom given to the group by integration methods. This count is
used by `hoomd.md.compute.ThermodynamicQuantities` and the integration methods
themselves to compute the kinetic temperature. See
`hoomd.State.update_group_dof` for details on when HOOMD-blue updates this
count.
"""
from hoomd.filter.filter_ import ParticleFilter # noqa
from hoomd.filter.all_ import All # noqa
from hoomd.filter.null import Null # noqa
from hoomd.filter.set_ import Intersection, SetDifference, Union # noqa
from hoomd.filter.tags import Tags # noqa
from hoomd.filter.type_ import Type # noqa
| bsd-3-clause | -1,608,828,876,454,665,700 | 50.866667 | 80 | 0.800771 | false |
lchsk/django-mongonaut | mongonaut/views.py | 1 | 12698 | # -*- coding: utf-8 -*-
"""
TODO move permission checks to the dispatch view thingee
"""
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.forms import Form
from django.http import HttpResponseForbidden
from django.http import Http404
from django.views.generic.edit import DeletionMixin
from django.views.generic import ListView
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
from mongoengine.fields import EmbeddedDocumentField, ListField
from mongonaut.forms import MongoModelForm
from mongonaut.mixins import MongonautFormViewMixin
from mongonaut.mixins import MongonautViewMixin
from mongonaut.utils import is_valid_object_id
class IndexView(MongonautViewMixin, ListView):
template_name = "mongonaut/index.html"
queryset = []
permission = 'has_view_permission'
def get_queryset(self):
return self.get_mongoadmins()
class AppListView(MongonautViewMixin, ListView):
""" :args: <app_label> """
template_name = "mongonaut/app_list.html"
class DocumentListView(MongonautViewMixin, FormView):
""" :args: <app_label> <document_name>
TODO - Make a generic document fetcher method
"""
form_class = Form
success_url = '/'
template_name = "mongonaut/document_list.html"
permission = 'has_view_permission'
documents_per_page = 25
#def dispatch(self, *args, **kwargs):
# self.set_mongoadmin()
# self.set_permissions()
# return super(DocumentListView, self).dispatch(*args, **kwargs)
def get_qset(self, queryset, q):
if self.mongoadmin.search_fields and q:
params = {}
for field in self.mongoadmin.search_fields:
if field == 'id':
# check to make sure this is a valid ID, otherwise we just continue
if is_valid_object_id(q):
return queryset.filter(pk=q)
continue
search_key = "{field}__icontains".format(field=field)
params[search_key] = q
queryset = queryset.filter(**params)
return queryset
def get_queryset(self):
if hasattr(self, "queryset") and self.queryset:
return self.queryset
self.set_mongonaut_base()
self.set_mongoadmin()
self.document = getattr(self.models, self.document_name)
queryset = self.document.objects.all()
if self.mongoadmin.ordering:
queryset = queryset.order_by(*self.mongoadmin.ordering)
# search. move this to get_queryset
# search. move this to get_queryset
q = self.request.GET.get('q')
queryset = self.get_qset(queryset, q)
### Start pagination
### Note:
### Didn't use the Paginator in Django cause mongoengine querysets are
### not the same as Django ORM querysets and it broke.
# Make sure page request is an int. If not, deliver first page.
try:
self.page = int(self.request.GET.get('page', '1'))
except ValueError:
self.page = 1
obj_count = queryset.count()
self.total_pages = obj_count / self.documents_per_page + (1 if obj_count % self.documents_per_page else 0)
if self.page < 1:
self.page = 1
if self.page > self.total_pages:
self.page = self.total_pages
start = (self.page - 1) * self.documents_per_page
end = self.page * self.documents_per_page
queryset = queryset[start:end] if obj_count else queryset
self.queryset = queryset
return queryset
def get_initial(self):
self.query = self.get_queryset()
mongo_ids = {'mongo_id': [unicode(x.id) for x in self.query]}
return mongo_ids
def get_context_data(self, **kwargs):
context = super(DocumentListView, self).get_context_data(**kwargs)
context = self.set_permissions_in_context(context)
if not context['has_view_permission']:
return HttpResponseForbidden("You do not have permissions to view this content.")
context['object_list'] = self.get_queryset()
context['document'] = self.document
context['app_label'] = self.app_label
context['document_name'] = self.document_name
# pagination bits
context['page'] = self.page
context['documents_per_page'] = self.documents_per_page
if self.page > 1:
previous_page_number = self.page - 1
else:
previous_page_number = None
if self.page < self.total_pages:
next_page_number = self.page + 1
else:
next_page_number = None
context['previous_page_number'] = previous_page_number
context['has_previous_page'] = previous_page_number is not None
context['next_page_number'] = next_page_number
context['has_next_page'] = next_page_number is not None
context['total_pages'] = self.total_pages
# Part of upcoming list view form functionality
if self.queryset.count():
context['keys'] = ['id', ]
# Show those items for which we've got list_fields on the mongoadmin
for key in [x for x in self.mongoadmin.list_fields if x != 'id' and x in self.document._fields.keys()]:
# TODO - Figure out why this EmbeddedDocumentField and ListField breaks this view
# Note - This is the challenge part, right? :)
if isinstance(self.document._fields[key], EmbeddedDocumentField):
continue
if isinstance(self.document._fields[key], ListField):
continue
context['keys'].append(key)
if self.mongoadmin.search_fields:
context['search_field'] = True
return context
def post(self, request, *args, **kwargs):
# TODO - make sure to check the rights of the poster
#self.get_queryset() # TODO - write something that grabs the document class better
form_class = self.get_form_class()
form = self.get_form(form_class)
mongo_ids = self.get_initial()['mongo_id']
for form_mongo_id in form.data.getlist('mongo_id'):
for mongo_id in mongo_ids:
if form_mongo_id == mongo_id:
self.document.objects.get(pk=mongo_id).delete()
return self.form_invalid(form)
class DocumentDetailView(MongonautViewMixin, TemplateView):
""" :args: <app_label> <document_name> <id> """
template_name = "mongonaut/document_detail.html"
permission = 'has_view_permission'
def get_context_data(self, **kwargs):
context = super(DocumentDetailView, self).get_context_data(**kwargs)
self.set_mongoadmin()
context = self.set_permissions_in_context(context)
self.document_type = getattr(self.models, self.document_name)
self.ident = self.kwargs.get('id')
self.document = self.document_type.objects.get(pk=self.ident)
context['document'] = self.document
context['app_label'] = self.app_label
context['document_name'] = self.document_name
context['keys'] = ['id', ]
context['embedded_documents'] = []
context['list_fields'] = []
for key in sorted([x for x in self.document._fields.keys() if x != 'id']):
# TODO - Figure out why this EmbeddedDocumentField and ListField breaks this view
# Note - This is the challenge part, right? :)
if isinstance(self.document._fields[key], EmbeddedDocumentField):
context['embedded_documents'].append(key)
continue
if isinstance(self.document._fields[key], ListField):
context['list_fields'].append(key)
continue
context['keys'].append(key)
return context
class DocumentEditFormView(MongonautViewMixin, FormView, MongonautFormViewMixin):
""" :args: <app_label> <document_name> <id> """
template_name = "mongonaut/document_edit_form.html"
form_class = Form
success_url = '/'
permission = 'has_edit_permission'
def get_success_url(self):
self.set_mongonaut_base()
return reverse('document_detail_edit_form', kwargs={'app_label': self.app_label, 'document_name': self.document_name, 'id': self.kwargs.get('id')})
def get_context_data(self, **kwargs):
context = super(DocumentEditFormView, self).get_context_data(**kwargs)
self.set_mongoadmin()
context = self.set_permissions_in_context(context)
self.document_type = getattr(self.models, self.document_name)
self.ident = self.kwargs.get('id')
self.document = self.document_type.objects.get(pk=self.ident)
context['document'] = self.document
context['app_label'] = self.app_label
context['document_name'] = self.document_name
context['form_action'] = reverse('document_detail_edit_form', args=[self.kwargs.get('app_label'),
self.kwargs.get('document_name'),
self.kwargs.get('id')])
return context
def get_form(self, Form):
self.set_mongoadmin()
context = self.set_permissions_in_context({})
if not context['has_edit_permission']:
return HttpResponseForbidden("You do not have permissions to edit this content.")
self.document_type = getattr(self.models, self.document_name)
self.ident = self.kwargs.get('id')
try:
self.document = self.document_type.objects.get(pk=self.ident)
except self.document_type.DoesNotExist:
raise Http404
self.form = Form()
if self.request.method == 'POST':
self.form = self.process_post_form('Your changes have been saved.')
else:
self.form = MongoModelForm(model=self.document_type, instance=self.document).get_form()
return self.form
class DocumentAddFormView(MongonautViewMixin, FormView, MongonautFormViewMixin):
""" :args: <app_label> <document_name> <id> """
template_name = "mongonaut/document_add_form.html"
form_class = Form
success_url = '/'
permission = 'has_add_permission'
def get_success_url(self):
self.set_mongonaut_base()
return reverse('document_detail', kwargs={'app_label': self.app_label, 'document_name': self.document_name, 'id': str(self.new_document.id)})
def get_context_data(self, **kwargs):
""" TODO - possibly inherit this from DocumentEditFormView. This is same thing minus:
self.ident = self.kwargs.get('id')
self.document = self.document_type.objects.get(pk=self.ident)
"""
context = super(DocumentAddFormView, self).get_context_data(**kwargs)
self.set_mongoadmin()
context = self.set_permissions_in_context(context)
self.document_type = getattr(self.models, self.document_name)
context['app_label'] = self.app_label
context['document_name'] = self.document_name
context['form_action'] = reverse('document_detail_add_form', args=[self.kwargs.get('app_label'),
self.kwargs.get('document_name')])
return context
def get_form(self, Form):
self.set_mongonaut_base()
self.document_type = getattr(self.models, self.document_name)
self.form = Form()
if self.request.method == 'POST':
self.form = self.process_post_form('Your new document has been added and saved.')
else:
self.form = MongoModelForm(model=self.document_type).get_form()
return self.form
class DocumentDeleteView(DeletionMixin, MongonautViewMixin, TemplateView):
""" :args: <app_label> <document_name> <id>
TODO - implement a GET view for confirmation
"""
success_url = "/"
template_name = "mongonaut/document_delete.html"
def get_success_url(self):
self.set_mongonaut_base()
messages.add_message(self.request, messages.INFO, 'Your document has been deleted.')
return reverse('document_list', kwargs={'app_label': self.app_label, 'document_name': self.document_name})
def get_object(self):
self.set_mongoadmin()
self.document_type = getattr(self.models, self.document_name)
self.ident = self.kwargs.get('id')
self.document = self.document_type.objects.get(pk=self.ident)
return self.document
| mit | -7,187,152,953,296,645,000 | 37.478788 | 155 | 0.619704 | false |
antismash/websmash | websmash/__init__.py | 1 | 1572 | from flask import Flask, g
from flask_mail import Mail
from urllib.parse import urlparse
from redis import Redis
from redis.sentinel import Sentinel
app = Flask(__name__)
import websmash.default_settings
app.config.from_object(websmash.default_settings)
app.config.from_envvar('WEBSMASH_CONFIG', silent=True)
mail = Mail(app)
def get_db():
redis_store = getattr(g, '_database', None)
if redis_store is None:
if 'FAKE_DB' in app.config and app.config['FAKE_DB']:
from mockredis import mock_redis_client
redis_store = g._database = mock_redis_client(encoding='utf-8', decode_responses=True)
else:
if app.config['REDIS_URL'].startswith('redis://'):
redis_store = g._database = Redis.from_url(app.config['REDIS_URL'], encoding='utf-8',
decode_responses=True)
elif app.config['REDIS_URL'].startswith('sentinel://'):
parsed_url = urlparse(app.config['REDIS_URL'])
service = parsed_url.path.lstrip('/')
port = 26379
if ':' in parsed_url.netloc:
host, str_port = parsed_url.netloc.split(':')
port = int(str_port)
else:
host = parsed_url.netloc
sentinel = Sentinel([(host, port)], socket_timeout=0.1)
redis_store = sentinel.master_for(service, redis_class=Redis, socket_timeout=0.1)
return redis_store
import websmash.api
import websmash.error_handlers
| agpl-3.0 | -4,731,344,320,122,627,000 | 39.307692 | 101 | 0.590331 | false |
CospanDesign/nysa-tx1-pcie-platform | tx1_pcie/slave/wb_tx1_pcie/cocotb/dut_driver.py | 1 | 6796 | #PUT LICENCE HERE!
"""
wb_tx1_pcie Driver
"""
import sys
import os
import time
from array import array as Array
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir))
from nysa.host.driver import driver
#Sub Module ID
#Use 'nysa devices' to get a list of different available devices
DEVICE_TYPE = "Experiment"
SDB_ABI_VERSION_MINOR = 0
SDB_VENDOR_ID = 0
try:
SDB_ABI_VERSION_MINOR = 0
SDB_VENDOR_ID = 0x800000000000C594
except SyntaxError:
pass
#Register Constants
CONTROL_ADDR = 0x00000000
STATUS_ADDR = 0x00000001
CONFIG_COMMAND = 0x00000002
CONFIG_STATUS = 0x00000003
CONFIG_DCOMMAND = 0x00000004
CONFIG_DCOMMAND2 = 0x00000005
CONFIG_DSTATUS = 0x00000006
CONFIG_LCOMMAND = 0x00000007
CONFIG_LSTATUS = 0x00000008
CONFIG_LINK_STATE = 0x00000009
RX_ELEC_IDLE = 0x0000000A
LTSSM_STATE = 0x0000000B
GTX_PLL_LOCK = 0x0000000C
TX_DIFF_CTR = 0x0000000D
STS_BIT_LINKUP = 0
STS_BIT_USR_RST = 1
STS_BIT_PCIE_RST_N = 2
STS_BIT_PHY_RDY_N = 3
STS_PLL_LOCKED = 4
STS_CLK_IN_STOPPED = 5
class wb_tx1_pcieDriver(driver.Driver):
""" wb_tx1_pcie
Communication with a DutDriver wb_tx1_pcie Core
"""
@staticmethod
def get_abi_class():
return 0
@staticmethod
def get_abi_major():
return driver.get_device_id_from_name(DEVICE_TYPE)
@staticmethod
def get_abi_minor():
return SDB_ABI_VERSION_MINOR
@staticmethod
def get_vendor_id():
return SDB_VENDOR_ID
def __init__(self, nysa, urn, debug = False):
super(wb_tx1_pcieDriver, self).__init__(nysa, urn, debug)
def set_control(self, control):
self.write_register(CONTROL_ADDR, control)
def get_control(self):
return self.read_register(CONTROL_ADDR)
def set_tx_diff(self, value):
self.write_register(TX_DIFF_CTR, value)
def get_tx_diff(self):
return self.read_register(TX_DIFF_CTR)
def is_linkup(self):
return self.is_register_bit_set(STATUS_ADDR, STS_BIT_LINKUP)
def is_pcie_usr_rst(self):
return self.is_register_bit_set(STATUS_ADDR, STS_BIT_USR_RST)
def is_pcie_phy_rst(self):
return self.is_register_bit_set(STATUS_ADDR, STS_BIT_PCIE_RST_N)
def is_pll_locked(self):
return self.is_register_bit_set(STATUS_ADDR, STS_PLL_LOCKED)
def is_clk_in_stopped(self):
return self.is_register_bit_set(STATUS_ADDR, STS_CLK_IN_STOPPED)
def is_pcie_phy_ready(self):
return not self.is_register_bit_set(STATUS_ADDR, STS_BIT_PHY_RDY_N)
def get_ltssm_state(self):
state = self.read_register(LTSSM_STATE)
if state == 0x000 : return "Detect.Quiet"
elif state == 0x001 : return "Detect.Quiet.Gen2"
elif state == 0x002 : return "Detect.Active"
elif state == 0x003 : return "Detect.ActiveSecond"
elif state == 0x004 : return "Polling.Active"
elif state == 0x005 : return "Polling.Config"
elif state == 0x006 : return "Polling.Comp.Pre.Send.Eios"
elif state == 0x007 : return "Polling.Comp.Pre.Timeout"
elif state == 0x008 : return "Polling.Comp.Send.Pattern"
elif state == 0x009 : return "Polling.Comp.Post.Send.Eior"
elif state == 0x00A : return "Polling.Comp.Post.Timeout"
elif state == 0x00B : return "Cfg.Lwidth.St0"
elif state == 0x00C : return "Cfg.Lwidth.St1"
elif state == 0x00D : return "Cfg.LWidth.Ac0"
elif state == 0x00E : return "Cfg.Lwidth.Ac1"
elif state == 0x00F : return "Cfg.Lnum.Wait"
elif state == 0x0010 : return "Cfg.Lnum.Acpt"
elif state == 0x0011 : return "Cfg.Complete.1"
elif state == 0x0012 : return "Cfg.Complete.2"
elif state == 0x0013 : return "Cfg.Complete.4"
elif state == 0x0014 : return "Cfg.Complete.8"
elif state == 0x0015 : return "Cfg.Idle"
elif state == 0x0016 : return "L0"
elif state == 0x0017 : return "L1.Entry.0"
elif state == 0x0018 : return "L1.Entry.1"
elif state == 0x0019 : return "L1.Entry.2"
elif state == 0x001A : return "L1.Idle"
elif state == 0x001B : return "L1.Exit"
elif state == 0x001C : return "Rec.RcvLock"
elif state == 0x001D : return "Rec.RcvCfg"
elif state == 0x001E : return "Rec.Speed.0"
elif state == 0x001F : return "Rec.Speed.1"
elif state == 0x0020 : return "Rec.Idle"
elif state == 0x0021 : return "Hot.Rst"
elif state == 0x0022 : return "Disabled.Entry.0"
elif state == 0x0023 : return "Disabled.Entry.1"
elif state == 0x0024 : return "Disabled.Entry.2"
elif state == 0x0025 : return "Disabled.Idle"
elif state == 0x0026 : return "Dp.Cfg.Lwidth.St0"
elif state == 0x0027 : return "Dp.Cfg.Lwidth.St1"
elif state == 0x0028 : return "Dp.Cfg.Lwidth.St2"
elif state == 0x0029 : return "Dp.Cfg.Lwidth.Ac0"
elif state == 0x002A : return "Dp.Cfg.Lwidth.Ac1"
elif state == 0x002B : return "Dp.Cfg.Lwidth.Wait"
elif state == 0x002C : return "Dp.Cfg.Lwidth.Acpt"
elif state == 0x002D : return "To.2.Detect"
elif state == 0x002E : return "Lpbk.Entry.0"
elif state == 0x002F : return "Lpbk.Entry.1"
elif state == 0x0030 : return "Lpbk.Active.0"
elif state == 0x0031 : return "Lpbk.Exit0"
elif state == 0x0032 : return "Lpbk.Exit1"
elif state == 0x0033 : return "Lpbkm.Entry0"
else:
return "Unknown State: 0x%02X" % state
def get_gtx_pll_lock_reg(self):
return self.read_register(GTX_PLL_LOCK)
def enable_control_0_bit(self, enable):
self.enable_register_bit(CONTROL_ADDR, ZERO_BIT, enable)
def is_control_0_bit_set(self):
return self.is_register_bit_set(CONTROL_ADDR, ZERO_BIT)
def get_cfg_command(self):
return self.read_register(CONFIG_COMMAND)
def get_cfg_status(self):
return self.read_register(CONFIG_STATUS)
def get_cfg_dcommand(self):
return self.read_register(CONFIG_DCOMMAND)
def get_cfg_dcommand2(self):
return self.read_register(CONFIG_DCOMMAND2)
def get_cfg_dstatus(self):
return self.read_register(CONFIG_DSTATUS)
def get_cfg_lcommand(self):
return self.read_register(CONFIG_LCOMMAND)
def get_cfg_lstatus(self):
return self.read_register(CONFIG_LSTATUS)
def get_link_state(self):
return self.read_register(CONFIG_LINK_STATE)
def get_elec_idle(self):
return self.read_register(RX_ELEC_IDLE)
| mit | -6,449,803,479,395,606,000 | 32.810945 | 75 | 0.617275 | false |
wmtprojectsteam/rivercontrolsystem | tests/test_solidstate_relay.py | 1 | 2034 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Solid-State-Relay test for the River System Control and Monitoring Software Version 0.9.2
# This file is part of the River System Control and Monitoring Software.
# Copyright (C) 2017-2018 Wimborne Model Town
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 or,
# at your option, any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import logging
import sys
import os
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
def run_standalone():
#Do required imports.
import Tools
from Tools import sensorobjects
from Tools.sensorobjects import Motor
print("Testing. Please stand by...")
#Create the motor object.
ssr = Motor("Motorey")
#Set the motor up.
ssr.set_pins(5, _input=False)
try:
time.sleep(3)
print("On")
ssr.enable()
time.sleep(15)
print("Off")
ssr.disable()
except BaseException as err:
#Ignore all errors. Generally bad practice :P
print("\nCaught Exception: ", err)
finally:
#Always clean up properly.
print("Cleaning up...")
#Reset GPIO pins.
GPIO.cleanup()
if __name__ == "__main__":
#Import here to prevent errors when generating documentation on non-RPi systems.
import RPi.GPIO as GPIO
#Set up a logger for the sensor objects.
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s: %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.DEBUG)
logger = logging
run_standalone()
| gpl-3.0 | -7,720,412,627,526,882,000 | 28.057143 | 138 | 0.670108 | false |
avedaee/DIRAC | Core/Utilities/MySQL.py | 1 | 58931 | ########################################################################
# $HeadURL$
########################################################################
""" DIRAC Basic MySQL Class
It provides access to the basic MySQL methods in a multithread-safe mode
keeping used connections in a python Queue for further reuse.
These are the coded methods:
__init__( host, user, passwd, name, [maxConnsInQueue=10] )
Initializes the Queue and tries to connect to the DB server,
using the _connect method.
"maxConnsInQueue" defines the size of the Queue of open connections
that are kept for reuse. It also defined the maximum number of open
connections available from the object.
maxConnsInQueue = 0 means unlimited and it is not supported.
_except( methodName, exception, errorMessage )
Helper method for exceptions: the "methodName" and the "errorMessage"
are printed with ERROR level, then the "exception" is printed (with
full description if it is a MySQL Exception) and S_ERROR is returned
with the errorMessage and the exception.
_connect()
Attempts connection to DB and sets the _connected flag to True upon success.
Returns S_OK or S_ERROR.
_query( cmd, [conn] )
Executes SQL command "cmd".
Gets a connection from the Queue (or open a new one if none is available),
the used connection is back into the Queue.
If a connection to the the DB is passed as second argument this connection
is used and is not in the Queue.
Returns S_OK with fetchall() out in Value or S_ERROR upon failure.
_update( cmd, [conn] )
Executes SQL command "cmd" and issue a commit
Gets a connection from the Queue (or open a new one if none is available),
the used connection is back into the Queue.
If a connection to the the DB is passed as second argument this connection
is used and is not in the Queue
Returns S_OK with number of updated registers in Value or S_ERROR upon failure.
_createTables( tableDict )
Create a new Table in the DB
_getConnection()
Gets a connection from the Queue (or open a new one if none is available)
Returns S_OK with connection in Value or S_ERROR
the calling method is responsible for closing this connection once it is no
longer needed.
Some high level methods have been added to avoid the need to write SQL
statement in most common cases. They should be used instead of low level
_insert, _update methods when ever possible.
buildCondition( self, condDict = None, older = None, newer = None,
timeStamp = None, orderAttribute = None, limit = False,
greater = None, smaller = None ):
Build SQL condition statement from provided condDict and other extra check on
a specified time stamp.
The conditions dictionary specifies for each attribute one or a List of possible
values
greater and smaller are dictionaries in which the keys are the names of the fields,
that are requested to be >= or < than the corresponding value.
For compatibility with current usage it uses Exceptions to exit in case of
invalid arguments
insertFields( self, tableName, inFields = None, inValues = None, conn = None, inDict = None ):
Insert a new row in "tableName" assigning the values "inValues" to the
fields "inFields".
Alternatively inDict can be used
String type values will be appropriately escaped.
updateFields( self, tableName, updateFields = None, updateValues = None,
condDict = None,
limit = False, conn = None,
updateDict = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
Update "updateFields" from "tableName" with "updateValues".
updateDict alternative way to provide the updateFields and updateValues
N records can match the condition
return S_OK( number of updated rows )
if limit is not False, the given limit is set
String type values will be appropriately escaped.
deleteEntries( self, tableName,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
Delete rows from "tableName" with
N records can match the condition
if limit is not False, the given limit is set
String type values will be appropriately escaped, they can be single values or lists of values.
getFields( self, tableName, outFields = None,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
Select "outFields" from "tableName" with condDict
N records can match the condition
return S_OK( tuple(Field,Value) )
if limit is not False, the given limit is set
String type values will be appropriately escaped, they can be single values or lists of values.
for compatibility with other methods condDict keyed argument is added
getCounters( self, table, attrList, condDict = None, older = None,
newer = None, timeStamp = None, connection = False ):
Count the number of records on each distinct combination of AttrList, selected
with condition defined by condDict and time stamps
getDistinctAttributeValues( self, table, attribute, condDict = None, older = None,
newer = None, timeStamp = None, connection = False ):
Get distinct values of a table attribute under specified conditions
"""
__RCSID__ = "$Id$"
from DIRAC import gLogger
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.DataStructures import MutableStruct
from DIRAC.Core.Utilities import Time
# Get rid of the annoying Deprecation warning of the current MySQLdb
# FIXME: compile a newer MySQLdb version
import warnings
with warnings.catch_warnings():
warnings.simplefilter( 'ignore', DeprecationWarning )
import MySQLdb
# Get rid of the annoying Deprecation warning of the current MySQLdb
# FIXME: compile a newer MySQLdb version
import warnings
with warnings.catch_warnings():
warnings.simplefilter( 'ignore', DeprecationWarning )
import MySQLdb
# This is for proper initialization of embeded server, it should only be called once
MySQLdb.server_init( ['--defaults-file=/opt/dirac/etc/my.cnf', '--datadir=/opt/mysql/db'], ['mysqld'] )
gInstancesCount = 0
gDebugFile = None
import collections
import time
import threading
from types import StringTypes, DictType, ListType, TupleType
MAXCONNECTRETRY = 10
def _checkQueueSize( maxQueueSize ):
"""
Helper to check maxQueueSize
"""
if maxQueueSize <= 0:
raise Exception( 'MySQL.__init__: maxQueueSize must positive' )
try:
maxQueueSize - 1
except Exception:
raise Exception( 'MySQL.__init__: wrong type for maxQueueSize' )
def _checkFields( inFields, inValues ):
"""
Helper to check match between inFields and inValues lengths
"""
if inFields == None and inValues == None:
return S_OK()
try:
assert len( inFields ) == len( inValues )
except:
return S_ERROR( 'Mismatch between inFields and inValues.' )
return S_OK()
def _quotedList( fieldList = None ):
"""
Quote a list of MySQL Field Names with "`"
Return a comma separated list of quoted Field Names
To be use for Table and Field Names
"""
if fieldList == None:
return None
quotedFields = []
try:
for field in fieldList:
quotedFields.append( '`%s`' % field.replace( '`', '' ) )
except Exception:
return None
if not quotedFields:
return None
return ', '.join( quotedFields )
class MySQL:
"""
Basic multithreaded DIRAC MySQL Client Class
"""
__initialized = False
class ConnectionPool( object ):
"""
Management of connections per thread
"""
__connData = MutableStruct( 'ConnData', [ 'conn', 'dbName', 'last', 'intrans' ] )
def __init__( self, host, user, passwd, port = 3306, graceTime = 600 ):
self.__host = host
self.__user = user
self.__passwd = passwd
self.__port = port
self.__graceTime = graceTime
self.__spares = collections.deque()
self.__maxSpares = 10
self.__lastClean = 0
self.__assigned = {}
@property
def __thid( self ):
return threading.current_thread()
def __newConn( self ):
conn = MySQLdb.connect( host = self.__host,
port = self.__port,
user = self.__user,
passwd = self.__passwd )
self.__execute( conn, "SET AUTOCOMMIT=1" )
return conn
def __execute( self, conn, cmd ):
cursor = conn.cursor()
res = cursor.execute( cmd )
cursor.close()
return res
def get( self, dbName, retries = 10 ):
retries = max( 0, min( MAXCONNECTRETRY, retries ) )
self.clean()
result = self.__getWithRetry( dbName, retries, retries )
if not result[ 'OK' ]:
return result
return S_OK( result[ 'Value' ].conn )
def __getWithRetry( self, dbName, totalRetries = 10, retriesLeft = 10 ):
sleepTime = 5 * ( totalRetries - retriesLeft )
if sleepTime > 0:
time.sleep( sleepTime )
try:
connData, thid = self.__innerGet()
except MySQLdb.MySQLError, excp:
if retriesLeft >= 0:
return self.__getWithRetry( dbName, totalRetries, retriesLeft - 1 )
return S_ERROR( "Could not connect: %s" % excp )
if not connData.intrans and not self.__ping( connData.conn ):
try:
self.__assigned.pop( thid )
except KeyError:
pass
if retriesLeft >= 0:
return self.__getWithRetry( dbName, totalRetries, retriesLeft )
return S_ERROR( "Could not connect" )
if connData.dbName != dbName:
try:
connData.conn.select_db( dbName )
connData.dbName = dbName
except MySQLdb.MySQLError, excp:
try:
self.__assigned.pop( thid ).conn.close()
except Exception:
pass
if retriesLeft >= 0:
return self.__getWithRetry( dbName, totalRetries, retriesLeft - 1 )
return S_ERROR( "Could not select db %s: %s" % ( dbName, excp ) )
return S_OK( connData )
def __ping( self, conn ):
try:
conn.ping( True )
return True
except:
return False
def __innerGet( self ):
thid = self.__thid
now = time.time()
try:
data = self.__assigned[ thid ]
data.last = now
return data, thid
except KeyError:
pass
#Not cached
try:
connData = self.__spares.pop()
except IndexError:
connData = self.__connData( self.__newConn(), "", now, False )
self.__assigned[ thid ] = connData
return self.__assigned[ thid ], thid
def __pop( self, thid ):
try:
connData = self.__assigned.pop( thid )
except KeyError:
return
if not connData.intrans and len( self.__spares ) < self.__maxSpares:
self.__spares.append( connData )
else:
connData.conn.close()
def clean( self, now = False ):
if not now:
now = time.time()
self.__lastClean = now
for thid in list( self.__assigned ):
if not thid.isAlive():
self.__pop( thid )
continue
try:
data = self.__assigned[ thid ]
except KeyError:
continue
if now - data.last > self.__graceTime:
self.__pop( thid )
def transactionStart( self, dbName ):
print "TRANS START"
result = self.__getWithRetry( dbName )
if not result[ 'OK' ]:
return result
connData = result[ 'Value' ]
try:
if connData.intrans:
raise RuntimeError( "Staring a MySQL transaction inside another one" )
self.__execute( connData.conn, "SET AUTOCOMMIT=0" )
self.__execute( connData.conn, "START TRANSACTION WITH CONSISTENT SNAPSHOT" )
connData.intrans = True
return S_OK()
except MySQLdb.MySQLError, excp:
return S_ERROR( "Could not begin transaction: %s" % excp )
def transactionCommit( self, dbName ):
print "TRANS COMMIT"
return self.__endTransaction( dbName, True )
def transactionRollback( self, dbName ):
print "TRANS ROLLBACK"
return self.__endTransaction( dbName, False )
def __endTransaction( self, dbName, commit ):
result = self.__getWithRetry( dbName )
if not result[ 'OK' ]:
return result
connData = result[ 'Value' ]
try:
if not connData.intrans:
gLogger.warn( "MySQL connection has reconnected. Transaction may be inconsistent" )
if commit:
result = connData.conn.commit()
else:
result = connData.conn.rollback()
self.__execute( connData.conn, "SET AUTOCOMMIT=1" )
connData.conn.commit()
connData.intrans = False
return S_OK( result )
except MySQLdb.MySQLError, excp:
return S_ERROR( "Could not end transaction: %s" % excp )
__connectionPools = {}
def __init__( self, hostName, userName, passwd, dbName, port = 3306, maxQueueSize = 3, debug = False ):
"""
set MySQL connection parameters and try to connect
"""
global gInstancesCount, gDebugFile
gInstancesCount += 1
self._connected = False
if 'log' not in dir( self ):
self.log = gLogger.getSubLogger( 'MySQL' )
self.logger = self.log
# let the derived class decide what to do with if is not 1
self._threadsafe = MySQLdb.thread_safe()
self.log.debug( 'thread_safe = %s' % self._threadsafe )
_checkQueueSize( maxQueueSize )
self.__hostName = str( hostName )
self.__userName = str( userName )
self.__passwd = str( passwd )
self.__dbName = str( dbName )
self.__port = port
cKey = ( self.__hostName, self.__userName, self.__passwd, self.__port )
if cKey not in MySQL.__connectionPools:
MySQL.__connectionPools[ cKey ] = MySQL.ConnectionPool( *cKey )
self.__connectionPool = MySQL.__connectionPools[ cKey ]
self.__initialized = True
result = self._connect()
if not result[ 'OK' ]:
gLogger.error( "Cannot connect to to DB: %s" % result[ 'Message' ] )
if debug:
try:
gDebugFile = open( "%s.debug.log" % self.__dbName, "w" )
except IOError:
pass
def __del__( self ):
global gInstancesCount
try:
gInstancesCount -= 1
except Exception:
pass
def _except( self, methodName, x, err ):
"""
print MySQL error or exception
return S_ERROR with Exception
"""
try:
raise x
except MySQLdb.Error, e:
self.log.debug( '%s: %s' % ( methodName, err ),
'%d: %s' % ( e.args[0], e.args[1] ) )
return S_ERROR( '%s: ( %d: %s )' % ( err, e.args[0], e.args[1] ) )
except Exception, e:
self.log.debug( '%s: %s' % ( methodName, err ), str( e ) )
return S_ERROR( '%s: (%s)' % ( err, str( e ) ) )
def __escapeString( self, myString ):
"""
To be used for escaping any MySQL string before passing it to the DB
this should prevent passing non-MySQL accepted characters to the DB
It also includes quotation marks " around the given string
"""
retDict = self.__getConnection()
if not retDict['OK']:
return retDict
connection = retDict['Value']
specialValues = ( 'UTC_TIMESTAMP', 'TIMESTAMPADD', 'TIMESTAMPDIFF' )
try:
myString = str( myString )
except ValueError:
return S_ERROR( "Cannot escape value!" )
try:
for sV in specialValues:
if myString.find( sV ) == 0:
return S_OK( myString )
escape_string = connection.escape_string( str( myString ) )
self.log.debug( '__escape_string: returns', '"%s"' % escape_string )
return S_OK( '"%s"' % escape_string )
except Exception, x:
self.log.debug( '__escape_string: Could not escape string', '"%s"' % myString )
return self._except( '__escape_string', x, 'Could not escape string' )
def __checkTable( self, tableName, force = False ):
table = _quotedList( [tableName] )
if not table:
return S_ERROR( 'Invalid tableName argument' )
cmd = 'SHOW TABLES'
retDict = self._query( cmd, debug = True )
if not retDict['OK']:
return retDict
if ( tableName, ) in retDict['Value']:
if not force:
# the requested exist and table creation is not force, return with error
return S_ERROR( 'Requested table %s already exists' % tableName )
else:
cmd = 'DROP TABLE %s' % table
retDict = self._update( cmd, debug = True )
if not retDict['OK']:
return retDict
return S_OK()
def _escapeString( self, myString, conn = None ):
"""
Wrapper around the internal method __escapeString
"""
self.log.debug( '_escapeString:', '"%s"' % str( myString ) )
return self.__escapeString( myString )
def _escapeValues( self, inValues = None ):
"""
Escapes all strings in the list of values provided
"""
self.log.debug( '_escapeValues:', inValues )
inEscapeValues = []
if not inValues:
return S_OK( inEscapeValues )
for value in inValues:
if type( value ) in StringTypes:
retDict = self.__escapeString( value )
if not retDict['OK']:
return retDict
inEscapeValues.append( retDict['Value'] )
elif type( value ) == TupleType or type( value ) == ListType:
tupleValues = []
for v in list( value ):
retDict = self.__escapeString( v )
if not retDict['OK']:
return retDict
tupleValues.append( retDict['Value'] )
inEscapeValues.append( '(' + ', '.join( tupleValues ) + ')' )
else:
retDict = self.__escapeString( str( value ) )
if not retDict['OK']:
return retDict
inEscapeValues.append( retDict['Value'] )
return S_OK( inEscapeValues )
def _connect( self ):
"""
open connection to MySQL DB and put Connection into Queue
set connected flag to True and return S_OK
return S_ERROR upon failure
"""
if not self.__initialized:
error = 'DB not properly initialized'
gLogger.error( error )
return S_ERROR( error )
self.log.debug( '_connect:', self._connected )
if self._connected:
return S_OK()
self.log.debug( '_connect: Attempting to access DB',
'[%s@%s] by user %s/%s.' %
( self.__dbName, self.__hostName, self.__userName, self.__passwd ) )
try:
self.log.verbose( '_connect: Connected.' )
self._connected = True
return S_OK()
except Exception, x:
return self._except( '_connect', x, 'Could not connect to DB.' )
def _query( self, cmd, conn = None, debug = False ):
"""
execute MySQL query command
return S_OK structure with fetchall result as tuple
it returns an empty tuple if no matching rows are found
return S_ERROR upon error
"""
if debug:
self.logger.debug( '_query:', cmd )
else:
if self.logger._minLevel == self.logger._logLevels.getLevelValue( 'DEBUG' ):
self.logger.verbose( '_query:', cmd )
else:
self.logger.verbose( '_query:', cmd[:min( len( cmd ) , 512 )] )
if gDebugFile:
start = time.time()
retDict = self.__getConnection()
if not retDict['OK']:
return retDict
connection = retDict[ 'Value' ]
try:
cursor = connection.cursor()
if cursor.execute( cmd ):
res = cursor.fetchall()
else:
res = ()
# Log the result limiting it to just 10 records
if len( res ) <= 10:
if debug:
self.logger.debug( '_query: returns', res )
else:
self.logger.verbose( '_query: returns', res )
else:
if debug:
self.logger.debug( '_query: Total %d records returned' % len( res ) )
self.logger.debug( '_query: %s ...' % str( res[:10] ) )
else:
self.logger.verbose( '_query: Total %d records returned' % len( res ) )
self.logger.verbose( '_query: %s ...' % str( res[:10] ) )
retDict = S_OK( res )
except Exception , x:
self.log.warn( '_query:', cmd )
retDict = self._except( '_query', x, 'Execution failed.' )
try:
cursor.close()
except Exception:
pass
if gDebugFile:
print >> gDebugFile, time.time() - start, cmd.replace( '\n', '' )
gDebugFile.flush()
return retDict
def _update( self, cmd, conn = None, debug = False ):
""" execute MySQL update command
return S_OK with number of updated registers upon success
return S_ERROR upon error
"""
if debug:
self.logger.debug( '_update:', cmd )
else:
if self.logger._minLevel == self.logger._logLevels.getLevelValue( 'DEBUG' ):
self.logger.verbose( '_update:', cmd )
else:
self.logger.verbose( '_update:', cmd[:min( len( cmd ) , 512 )] )
if gDebugFile:
start = time.time()
retDict = self.__getConnection( conn = conn )
if not retDict['OK']:
return retDict
connection = retDict['Value']
try:
cursor = connection.cursor()
res = cursor.execute( cmd )
# connection.commit()
if debug:
self.log.debug( '_update:', res )
else:
self.log.verbose( '_update:', res )
retDict = S_OK( res )
if cursor.lastrowid:
retDict[ 'lastRowId' ] = cursor.lastrowid
except Exception, x:
self.log.warn( '_update: %s: %s' % ( cmd, str( x ) ) )
retDict = self._except( '_update', x, 'Execution failed.' )
try:
cursor.close()
except Exception:
pass
if gDebugFile:
print >> gDebugFile, time.time() - start, cmd.replace( '\n', '' )
gDebugFile.flush()
return retDict
def _transaction( self, cmdList, conn = None ):
""" dummy transaction support
:param self: self reference
:param list cmdList: list of queries to be executed within the transaction
:param MySQLDB.Connection conn: connection
:return: S_OK( [ ( cmd1, ret1 ), ... ] ) or S_ERROR
"""
if type( cmdList ) != ListType:
return S_ERROR( "_transaction: wrong type (%s) for cmdList" % type( cmdList ) )
# # get connection
connection = conn
if not connection:
retDict = self.__getConnection()
if not retDict['OK']:
return retDict
connection = retDict[ 'Value' ]
# # list with cmds and their results
cmdRet = []
try:
cursor = connection.cursor()
for cmd in cmdList:
cmdRet.append( ( cmd, cursor.execute( cmd ) ) )
connection.commit()
except Exception, error:
self.logger.execption( error )
# # rollback, put back connection to the pool
connection.rollback()
return S_ERROR( error )
# # close cursor, put back connection to the pool
cursor.close()
return S_OK( cmdRet )
def _createViews( self, viewsDict, force = False ):
""" create view based on query
:param dict viewDict: { 'ViewName': "Fields" : { "`a`": `tblA.a`, "`sumB`" : "SUM(`tblB.b`)" }
"SelectFrom" : "tblA join tblB on tblA.id = tblB.id",
"Clauses" : [ "`tblA.a` > 10", "`tblB.Status` = 'foo'" ] ## WILL USE AND CLAUSE
"GroupBy": [ "`a`" ],
"OrderBy": [ "`b` DESC" ] }
"""
if force:
gLogger.debug( viewsDict )
for viewName, viewDict in viewsDict.items():
viewQuery = [ "CREATE OR REPLACE VIEW `%s`.`%s` AS" % ( self.__dbName, viewName ) ]
columns = ",".join( [ "%s AS %s" % ( colDef, colName )
for colName, colDef in viewDict.get( "Fields", {} ).items() ] )
tables = viewDict.get( "SelectFrom", "" )
if columns and tables:
viewQuery.append( "SELECT %s FROM %s" % ( columns, tables ) )
where = " AND ".join( viewDict.get( "Clauses", [] ) )
if where:
viewQuery.append( "WHERE %s" % where )
groupBy = ",".join( viewDict.get( "GroupBy", [] ) )
if groupBy:
viewQuery.append( "GROUP BY %s" % groupBy )
orderBy = ",".join( viewDict.get( "OrderBy", [] ) )
if orderBy:
viewQuery.append( "ORDER BY %s" % orderBy )
viewQuery.append( ";" )
viewQuery = " ".join( viewQuery )
self.log.debug( "`%s` VIEW QUERY IS: %s" % ( viewName, viewQuery ) )
createView = self._query( viewQuery )
if not createView["OK"]:
gLogger.error( createView["Message"] )
return createView
return S_OK()
def _createTables( self, tableDict, force = False, okIfTableExists = True ):
"""
tableDict:
tableName: { 'Fields' : { 'Field': 'Description' },
'ForeignKeys': {'Field': 'Table.key' },
'PrimaryKey': 'Id',
'Indexes': { 'Index': [] },
'UniqueIndexes': { 'Index': [] },
'Engine': 'InnoDB' }
only 'Fields' is a mandatory key.
Creates a new Table for each key in tableDict, "tableName" in the DB with
the provided description.
It allows to create:
- flat tables if no "ForeignKeys" key defined.
- tables with foreign keys to auxiliary tables holding the values
of some of the fields
Arguments:
tableDict: dictionary of dictionary with description of tables to be created.
Only "Fields" is a mandatory key in the table description.
"Fields": Dictionary with Field names and description of the fields
"ForeignKeys": Dictionary with Field names and name of auxiliary tables.
The auxiliary tables must be defined in tableDict.
"PrimaryKey": Name of PRIMARY KEY for the table (if exist).
"Indexes": Dictionary with definition of indexes, the value for each
index is the list of fields to be indexed.
"UniqueIndexes": Dictionary with definition of indexes, the value for each
index is the list of fields to be indexed. This indexes will declared
unique.
"Engine": use the given DB engine, InnoDB is the default if not present.
force:
if True, requested tables are DROP if they exist.
if False (default), tables are not overwritten
okIfTableExists:
if True (default), returns S_OK if table exists
if False, returns S_ERROR if table exists
"""
# First check consistency of request
if type( tableDict ) != DictType:
return S_ERROR( 'Argument is not a dictionary: %s( %s )'
% ( type( tableDict ), tableDict ) )
tableList = tableDict.keys()
if len( tableList ) == 0:
return S_OK( 0 )
for table in tableList:
thisTable = tableDict[table]
# Check if Table is properly described with a dictionary
if type( thisTable ) != DictType:
return S_ERROR( 'Table description is not a dictionary: %s( %s )'
% ( type( thisTable ), thisTable ) )
if not 'Fields' in thisTable:
return S_ERROR( 'Missing `Fields` key in `%s` table dictionary' % table )
tableCreationList = [[]]
auxiliaryTableList = []
i = 0
extracted = True
while tableList and extracted:
# iterate extracting tables from list if they only depend on
# already extracted tables.
extracted = False
auxiliaryTableList += tableCreationList[i]
i += 1
tableCreationList.append( [] )
for table in list( tableList ):
toBeExtracted = True
thisTable = tableDict[table]
if 'ForeignKeys' in thisTable:
thisKeys = thisTable['ForeignKeys']
for key, auxTable in thisKeys.items():
forTable = auxTable.split( '.' )[0]
forKey = key
if forTable != auxTable:
forKey = auxTable.split( '.' )[1]
if forTable not in auxiliaryTableList:
toBeExtracted = False
break
if not key in thisTable['Fields']:
return S_ERROR( 'ForeignKey `%s` -> `%s` not defined in Primary table `%s`.'
% ( key, forKey, table ) )
if not forKey in tableDict[forTable]['Fields']:
return S_ERROR( 'ForeignKey `%s` -> `%s` not defined in Auxiliary table `%s`.'
% ( key, forKey, forTable ) )
if toBeExtracted:
self.log.debug( 'Table %s ready to be created' % table )
extracted = True
tableList.remove( table )
tableCreationList[i].append( table )
if tableList:
return S_ERROR( 'Recursive Foreign Keys in %s' % ', '.join( tableList ) )
createdTablesList = []
for tableList in tableCreationList:
for table in tableList:
# Check if Table exists
retDict = self.__checkTable( table, force = force )
if not retDict['OK']:
message = 'The requested table already exists'
if retDict['Message'] == message and okIfTableExists:
continue
return retDict
thisTable = tableDict[table]
cmdList = []
for field in thisTable['Fields'].keys():
cmdList.append( '`%s` %s' % ( field, thisTable['Fields'][field] ) )
if thisTable.has_key( 'PrimaryKey' ):
if type( thisTable['PrimaryKey'] ) in StringTypes:
cmdList.append( 'PRIMARY KEY ( `%s` )' % thisTable['PrimaryKey'] )
else:
cmdList.append( 'PRIMARY KEY ( %s )' % ", ".join( [ "`%s`" % str( f ) for f in thisTable['PrimaryKey'] ] ) )
if thisTable.has_key( 'Indexes' ):
indexDict = thisTable['Indexes']
for index in indexDict:
indexedFields = '`, `'.join( indexDict[index] )
cmdList.append( 'INDEX `%s` ( `%s` )' % ( index, indexedFields ) )
if thisTable.has_key( 'UniqueIndexes' ):
indexDict = thisTable['UniqueIndexes']
for index in indexDict:
indexedFields = '`, `'.join( indexDict[index] )
cmdList.append( 'UNIQUE INDEX `%s` ( `%s` )' % ( index, indexedFields ) )
if 'ForeignKeys' in thisTable:
thisKeys = thisTable['ForeignKeys']
for key, auxTable in thisKeys.items():
forTable = auxTable.split( '.' )[0]
forKey = key
if forTable != auxTable:
forKey = auxTable.split( '.' )[1]
# cmdList.append( '`%s` %s' % ( forTable, tableDict[forTable]['Fields'][forKey] )
cmdList.append( 'FOREIGN KEY ( `%s` ) REFERENCES `%s` ( `%s` )'
' ON DELETE RESTRICT' % ( key, forTable, forKey ) )
if thisTable.has_key( 'Engine' ):
engine = thisTable['Engine']
else:
engine = 'InnoDB'
cmd = 'CREATE TABLE `%s` (\n%s\n) ENGINE=%s' % (
table, ',\n'.join( cmdList ), engine )
retDict = self._update( cmd, debug = True )
if not retDict['OK']:
return retDict
self.log.debug( 'Table %s created' % table )
createdTablesList.append( table )
return S_OK( createdTablesList )
def _getFields( self, tableName, outFields = None,
inFields = None, inValues = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
"""
Wrapper to the new method for backward compatibility
"""
self.log.warn( '_getFields:', 'deprecation warning, use getFields methods instead of _getFields.' )
retDict = _checkFields( inFields, inValues )
if not retDict['OK']:
self.log.warn( '_getFields:', retDict['Message'] )
return retDict
condDict = {}
if inFields != None:
try:
condDict.update( [ ( inFields[k], inValues[k] ) for k in range( len( inFields ) )] )
except Exception, x:
return S_ERROR( x )
return self.getFields( tableName, outFields, condDict, limit, conn, older, newer, timeStamp, orderAttribute )
def _insert( self, tableName, inFields = None, inValues = None, conn = None ):
"""
Wrapper to the new method for backward compatibility
"""
self.log.warn( '_insert:', 'deprecation warning, use insertFields methods instead of _insert.' )
return self.insertFields( tableName, inFields, inValues, conn )
def _to_value( self, param ):
"""
Convert to string
"""
return str( param[0] )
def _to_string( self, param ):
"""
"""
return param[0].tostring()
def _getConnection( self ):
"""
Return a new connection to the DB
It uses the private method __getConnection
"""
self.log.debug( '_getConnection:' )
retDict = self.__getConnection( trial = 0 )
return retDict
def __getConnection( self, conn = None, trial = 0 ):
"""
Return a new connection to the DB,
if conn is provided then just return it.
then try the Queue, if it is empty add a newConnection to the Queue and retry
it will retry MAXCONNECTRETRY to open a new connection and will return
an error if it fails.
"""
self.log.debug( '__getConnection:' )
if not self.__initialized:
error = 'DB not properly initialized'
gLogger.error( error )
return S_ERROR( error )
return self.__connectionPool.get( self.__dbName )
########################################################################################
#
# Transaction functions
#
########################################################################################
def transactionStart( self ):
return self.__connectionPool.transactionStart( self.__dbName )
def transactionCommit( self ):
return self.__connectionPool.transactionCommit( self.__dbName )
def transactionRollback( self ):
return self.__connectionPool.transactionRollback( self.__dbName )
@property
def transaction( self ):
""" Transaction guard """
class TransactionGuard( object ):
def __init__( self, db ):
self.__db = db
self.__ok = False
def __enter__( self ):
self.__db.transactionStart()
def commitWard( *args ):
self.__ok = True
return args
return commitWard
def __exit__( self, exType, exValue, traceback ):
if exValue or not self.__ok:
self.__db.transactionRollback()
else:
self.__db.transactionCommit()
return TransactionGuard( self )
########################################################################################
#
# Utility functions
#
########################################################################################
def countEntries( self, table, condDict, older = None, newer = None, timeStamp = None, connection = False,
greater = None, smaller = None ):
"""
Count the number of entries wit the given conditions
"""
table = _quotedList( [table] )
if not table:
error = 'Invalid table argument'
self.log.debug( 'countEntries:', error )
return S_ERROR( error )
try:
cond = self.buildCondition( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
cmd = 'SELECT COUNT(*) FROM %s %s' % ( table, cond )
res = self._query( cmd , connection, debug = True )
if not res['OK']:
return res
return S_OK( res['Value'][0][0] )
########################################################################################
def getCounters( self, table, attrList, condDict, older = None, newer = None, timeStamp = None, connection = False,
greater = None, smaller = None ):
"""
Count the number of records on each distinct combination of AttrList, selected
with condition defined by condDict and time stamps
"""
table = _quotedList( [table] )
if not table:
error = 'Invalid table argument'
self.log.debug( 'getCounters:', error )
return S_ERROR( error )
attrNames = _quotedList( attrList )
if attrNames == None:
error = 'Invalid updateFields argument'
self.log.debug( 'getCounters:', error )
return S_ERROR( error )
try:
cond = self.buildCondition( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
cmd = 'SELECT %s, COUNT(*) FROM %s %s GROUP BY %s ORDER BY %s' % ( attrNames, table, cond, attrNames, attrNames )
res = self._query( cmd , connection, debug = True )
if not res['OK']:
return res
resultList = []
for raw in res['Value']:
attrDict = {}
for i in range( len( attrList ) ):
attrDict[attrList[i]] = raw[i]
item = ( attrDict, raw[len( attrList )] )
resultList.append( item )
return S_OK( resultList )
#########################################################################################
def getDistinctAttributeValues( self, table, attribute, condDict = None, older = None,
newer = None, timeStamp = None, connection = False,
greater = None, smaller = None ):
"""
Get distinct values of a table attribute under specified conditions
"""
table = _quotedList( [table] )
if not table:
error = 'Invalid table argument'
self.log.debug( 'getDistinctAttributeValues:', error )
return S_ERROR( error )
attributeName = _quotedList( [attribute] )
if not attributeName:
error = 'Invalid attribute argument'
self.log.debug( 'getDistinctAttributeValues:', error )
return S_ERROR( error )
try:
cond = self.buildCondition( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
cmd = 'SELECT DISTINCT( %s ) FROM %s %s ORDER BY %s' % ( attributeName, table, cond, attributeName )
res = self._query( cmd, connection, debug = True )
if not res['OK']:
return res
attr_list = [ x[0] for x in res['Value'] ]
return S_OK( attr_list )
#############################################################################
def buildCondition( self, condDict = None, older = None, newer = None,
timeStamp = None, orderAttribute = None, limit = False,
greater = None, smaller = None, offset = None ):
""" Build SQL condition statement from provided condDict and other extra check on
a specified time stamp.
The conditions dictionary specifies for each attribute one or a List of possible
values
greater and smaller are dictionaries in which the keys are the names of the fields,
that are requested to be >= or < than the corresponding value.
For compatibility with current usage it uses Exceptions to exit in case of
invalid arguments
"""
condition = ''
conjunction = "WHERE"
if condDict != None:
for aName, attrValue in condDict.items():
if type( aName ) in StringTypes:
attrName = _quotedList( [aName] )
elif type( aName ) == TupleType:
attrName = '('+_quotedList( list( aName ) )+')'
if not attrName:
error = 'Invalid condDict argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
if type( attrValue ) == ListType:
retDict = self._escapeValues( attrValue )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValues = retDict['Value']
multiValue = ', '.join( escapeInValues )
condition = ' %s %s %s IN ( %s )' % ( condition,
conjunction,
attrName,
multiValue )
conjunction = "AND"
else:
retDict = self._escapeValues( [ attrValue ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s = %s' % ( condition,
conjunction,
attrName,
escapeInValue )
conjunction = "AND"
if timeStamp:
timeStamp = _quotedList( [timeStamp] )
if not timeStamp:
error = 'Invalid timeStamp argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
if newer:
retDict = self._escapeValues( [ newer ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s >= %s' % ( condition,
conjunction,
timeStamp,
escapeInValue )
conjunction = "AND"
if older:
retDict = self._escapeValues( [ older ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s < %s' % ( condition,
conjunction,
timeStamp,
escapeInValue )
if type( greater ) == DictType:
for attrName, attrValue in greater.items():
attrName = _quotedList( [attrName] )
if not attrName:
error = 'Invalid greater argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
retDict = self._escapeValues( [ attrValue ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s >= %s' % ( condition,
conjunction,
attrName,
escapeInValue )
conjunction = "AND"
if type( smaller ) == DictType:
for attrName, attrValue in smaller.items():
attrName = _quotedList( [attrName] )
if not attrName:
error = 'Invalid smaller argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
retDict = self._escapeValues( [ attrValue ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s < %s' % ( condition,
conjunction,
attrName,
escapeInValue )
conjunction = "AND"
orderList = []
orderAttrList = orderAttribute
if type( orderAttrList ) != ListType:
orderAttrList = [ orderAttribute ]
for orderAttr in orderAttrList:
if orderAttr == None:
continue
if type( orderAttr ) not in StringTypes:
error = 'Invalid orderAttribute argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
orderField = _quotedList( orderAttr.split( ':' )[:1] )
if not orderField:
error = 'Invalid orderAttribute argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
if len( orderAttr.split( ':' ) ) == 2:
orderType = orderAttr.split( ':' )[1].upper()
if orderType in [ 'ASC', 'DESC']:
orderList.append( '%s %s' % ( orderField, orderType ) )
else:
error = 'Invalid orderAttribute argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
else:
orderList.append( orderAttr )
if orderList:
condition = "%s ORDER BY %s" % ( condition, ', '.join( orderList ) )
if limit:
if offset:
condition = "%s LIMIT %d OFFSET %d" % ( condition, limit, offset )
else:
condition = "%s LIMIT %d" % ( condition, limit )
return condition
#############################################################################
def getFields( self, tableName, outFields = None,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None,
greater = None, smaller = None ):
"""
Select "outFields" from "tableName" with condDict
N records can match the condition
return S_OK( tuple(Field,Value) )
if outFields == None all fields in "tableName" are returned
if limit is not False, the given limit is set
inValues are properly escaped using the _escape_string method, they can be single values or lists of values.
"""
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'getFields:', error )
return S_ERROR( error )
quotedOutFields = '*'
if outFields:
quotedOutFields = _quotedList( outFields )
if quotedOutFields == None:
error = 'Invalid outFields arguments'
self.log.warn( 'getFields:', error )
return S_ERROR( error )
self.log.verbose( 'getFields:', 'selecting fields %s from table %s.' %
( quotedOutFields, table ) )
if condDict == None:
condDict = {}
try:
try:
mylimit = limit[0]
myoffset = limit[1]
except:
mylimit = limit
myoffset = None
condition = self.buildCondition( condDict = condDict, older = older, newer = newer,
timeStamp = timeStamp, orderAttribute = orderAttribute, limit = mylimit,
greater = None, smaller = None, offset = myoffset )
except Exception, x:
return S_ERROR( x )
return self._query( 'SELECT %s FROM %s %s' %
( quotedOutFields, table, condition ), conn, debug = True )
#############################################################################
def deleteEntries( self, tableName,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None,
greater = None, smaller = None ):
"""
Delete rows from "tableName" with
N records can match the condition
if limit is not False, the given limit is set
String type values will be appropriately escaped, they can be single values or lists of values.
"""
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'deleteEntries:', error )
return S_ERROR( error )
self.log.verbose( 'deleteEntries:', 'deleting rows from table %s.' % table )
try:
condition = self.buildCondition( condDict = condDict, older = older, newer = newer,
timeStamp = timeStamp, orderAttribute = orderAttribute, limit = limit,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
return self._update( 'DELETE FROM %s %s' % ( table, condition ), conn, debug = True )
#############################################################################
def updateFields( self, tableName, updateFields = None, updateValues = None,
condDict = None,
limit = False, conn = None,
updateDict = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None,
greater = None, smaller = None ):
"""
Update "updateFields" from "tableName" with "updateValues".
updateDict alternative way to provide the updateFields and updateValues
N records can match the condition
return S_OK( number of updated rows )
if limit is not False, the given limit is set
String type values will be appropriately escaped.
"""
if not updateFields and not updateDict:
return S_OK( 0 )
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'updateFields:', error )
return S_ERROR( error )
retDict = _checkFields( updateFields, updateValues )
if not retDict['OK']:
error = 'Mismatch between updateFields and updateValues.'
self.log.warn( 'updateFields:', error )
return S_ERROR( error )
if updateFields == None:
updateFields = []
updateValues = []
if updateDict:
if type( updateDict ) != DictType:
error = 'updateDict must be a of Type DictType'
self.log.warn( 'updateFields:', error )
return S_ERROR( error )
try:
updateFields += updateDict.keys()
updateValues += [updateDict[k] for k in updateDict.keys()]
except TypeError:
error = 'updateFields and updateValues must be a list'
self.log.warn( 'updateFields:', error )
return S_ERROR( error )
updateValues = self._escapeValues( updateValues )
if not updateValues['OK']:
self.log.warn( 'updateFields:', updateValues['Message'] )
return updateValues
updateValues = updateValues['Value']
self.log.verbose( 'updateFields:', 'updating fields %s from table %s.' %
( ', '.join( updateFields ), table ) )
try:
condition = self.buildCondition( condDict = condDict, older = older, newer = newer,
timeStamp = timeStamp, orderAttribute = orderAttribute, limit = limit,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
updateString = ','.join( ['%s = %s' % ( _quotedList( [updateFields[k]] ),
updateValues[k] ) for k in range( len( updateFields ) ) ] )
return self._update( 'UPDATE %s SET %s %s' %
( table, updateString, condition ), conn, debug = True )
#############################################################################
def insertFields( self, tableName, inFields = None, inValues = None, conn = None, inDict = None ):
"""
Insert a new row in "tableName" assigning the values "inValues" to the
fields "inFields".
String type values will be appropriately escaped.
"""
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'insertFields:', error )
return S_ERROR( error )
retDict = _checkFields( inFields, inValues )
if not retDict['OK']:
self.log.warn( 'insertFields:', retDict['Message'] )
return retDict
if inFields == None:
inFields = []
inValues = []
if inDict:
if type( inDict ) != DictType:
error = 'inDict must be a of Type DictType'
self.log.warn( 'insertFields:', error )
return S_ERROR( error )
try:
inFields += inDict.keys()
inValues += [inDict[k] for k in inDict.keys()]
except TypeError:
error = 'inFields and inValues must be a list'
self.log.warn( 'insertFields:', error )
return S_ERROR( error )
inFieldString = _quotedList( inFields )
if inFieldString == None:
error = 'Invalid inFields arguments'
self.log.warn( 'insertFields:', error )
return S_ERROR( error )
inFieldString = '( %s )' % inFieldString
retDict = self._escapeValues( inValues )
if not retDict['OK']:
self.log.warn( 'insertFields:', retDict['Message'] )
return retDict
inValueString = ', '.join( retDict['Value'] )
inValueString = '( %s )' % inValueString
self.log.verbose( 'insertFields:', 'inserting %s into table %s'
% ( inFieldString, table ) )
return self._update( 'INSERT INTO %s %s VALUES %s' %
( table, inFieldString, inValueString ), conn, debug = True )
#####################################################################################
#
# This is a test code for this class, it requires access to a MySQL DB
#
if __name__ == '__main__':
import os
import sys
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
if 'PYTHONOPTIMIZE' in os.environ and os.environ['PYTHONOPTIMIZE']:
gLogger.info( 'Unset python optimization "PYTHONOPTIMIZE"' )
sys.exit( 0 )
gLogger.info( 'Testing MySQL class...' )
HOST = '127.0.0.1'
USER = 'Dirac'
PWD = 'Dirac'
DB = 'AccountingDB'
TESTDB = MySQL( HOST, USER, PWD, DB )
assert TESTDB._connect()['OK']
TESTDICT = { 'TestTable' : { 'Fields': { 'ID' : "INTEGER UNIQUE NOT NULL AUTO_INCREMENT",
'Name' : "VARCHAR(256) NOT NULL DEFAULT 'Yo'",
'Surname' : "VARCHAR(256) NOT NULL DEFAULT 'Tu'",
'Count' : "INTEGER NOT NULL DEFAULT 0",
'Time' : "DATETIME",
},
'PrimaryKey': 'ID'
}
}
NAME = 'TestTable'
FIELDS = [ 'Name', 'Surname' ]
NEWVALUES = [ 'Name2', 'Surn2' ]
SOMEFIELDS = [ 'Name', 'Surname', 'Count' ]
ALLFIELDS = [ 'ID', 'Name', 'Surname', 'Count', 'Time' ]
ALLVALUES = [ 1, 'Name1', 'Surn1', 1, 'UTC_TIMESTAMP()' ]
ALLDICT = dict( Name = 'Name1', Surname = 'Surn1', Count = 1, Time = 'UTC_TIMESTAMP()' )
COND0 = {}
COND10 = {'Count': range( 10 )}
try:
RESULT = TESTDB._createTables( TESTDICT, force = True )
assert RESULT['OK']
print 'Table Created'
RESULT = TESTDB.getCounters( NAME, FIELDS, COND0 )
assert RESULT['OK']
assert RESULT['Value'] == []
RESULT = TESTDB.getDistinctAttributeValues( NAME, FIELDS[0], COND0 )
assert RESULT['OK']
assert RESULT['Value'] == []
RESULT = TESTDB.getFields( NAME, FIELDS )
assert RESULT['OK']
assert RESULT['Value'] == ()
print 'Inserting'
for J in range( 100 ):
RESULT = TESTDB.insertFields( NAME, SOMEFIELDS, ['Name1', 'Surn1', J] )
assert RESULT['OK']
assert RESULT['Value'] == 1
assert RESULT['lastRowId'] == J + 1
print 'Querying'
RESULT = TESTDB.getCounters( NAME, FIELDS, COND0 )
assert RESULT['OK']
assert RESULT['Value'] == [( {'Surname': 'Surn1', 'Name': 'Name1'}, 100L )]
RESULT = TESTDB.getDistinctAttributeValues( NAME, FIELDS[0], COND0 )
assert RESULT['OK']
assert RESULT['Value'] == ['Name1']
RESULT = TESTDB.getFields( NAME, FIELDS )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 100
RESULT = TESTDB.getFields( NAME, SOMEFIELDS, COND10 )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 10
RESULT = TESTDB.getFields( NAME, limit = 1 )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 1
RESULT = TESTDB.getFields( NAME, ['Count'], orderAttribute = 'Count:DESC', limit = 1 )
assert RESULT['OK']
assert RESULT['Value'] == ( ( 99, ), )
RESULT = TESTDB.getFields( NAME, ['Count'], orderAttribute = 'Count:ASC', limit = 1 )
assert RESULT['OK']
assert RESULT['Value'] == ( ( 0, ), )
RESULT = TESTDB.getCounters( NAME, FIELDS, COND10 )
assert RESULT['OK']
assert RESULT['Value'] == [( {'Surname': 'Surn1', 'Name': 'Name1'}, 10L )]
RESULT = TESTDB._getFields( NAME, FIELDS, COND10.keys(), COND10.values() )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 10
RESULT = TESTDB.updateFields( NAME, FIELDS, NEWVALUES, COND10 )
assert RESULT['OK']
assert RESULT['Value'] == 10
RESULT = TESTDB.updateFields( NAME, FIELDS, NEWVALUES, COND10 )
assert RESULT['OK']
assert RESULT['Value'] == 0
print 'Removing'
RESULT = TESTDB.deleteEntries( NAME, COND10 )
assert RESULT['OK']
assert RESULT['Value'] == 10
RESULT = TESTDB.deleteEntries( NAME )
assert RESULT['OK']
assert RESULT['Value'] == 90
RESULT = TESTDB.getCounters( NAME, FIELDS, COND0 )
assert RESULT['OK']
assert RESULT['Value'] == []
RESULT = TESTDB.insertFields( NAME, inFields = ALLFIELDS, inValues = ALLVALUES )
assert RESULT['OK']
assert RESULT['Value'] == 1
time.sleep( 1 )
RESULT = TESTDB.insertFields( NAME, inDict = ALLDICT )
assert RESULT['OK']
assert RESULT['Value'] == 1
time.sleep( 2 )
RESULT = TESTDB.getFields( NAME, older = 'UTC_TIMESTAMP()', timeStamp = 'Time' )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 2
RESULT = TESTDB.getFields( NAME, newer = 'UTC_TIMESTAMP()', timeStamp = 'Time' )
assert len( RESULT['Value'] ) == 0
RESULT = TESTDB.getFields( NAME, older = Time.toString(), timeStamp = 'Time' )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 2
RESULT = TESTDB.getFields( NAME, newer = Time.dateTime(), timeStamp = 'Time' )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 0
RESULT = TESTDB.deleteEntries( NAME )
assert RESULT['OK']
assert RESULT['Value'] == 2
print 'OK'
except AssertionError:
print 'ERROR ',
if not RESULT['OK']:
print RESULT['Message']
else:
print RESULT
| gpl-3.0 | -1,148,100,160,074,171,400 | 33.767552 | 120 | 0.569072 | false |
racemidev/RegAdminForLinux | python/ctypeslib_extra/c_util/c_obj.py | 1 | 10559 |
import ctypes
import re
import exceptions
import inspect
class CObjException(exceptions.StandardError):
def __init__(self, msg, *args, **kwds):
exceptions.Exception(self, msg, *args, **kwds)
self.msg = msg
return
def __str__(self):
return "%s: %s" % (self, self.msg)
class CObj(object):
"""Helper wrapper to objectify C objects from ctypes.
"""
_DEBUG_LEVEL = 0
def __init__(self, *args, **kwds):
super(CObj, self).__init__(args, kwds)
if hasattr(self, "_ccu_init"):
self._ccu_init(*args)
return
def _convert(self, arg, typ):
return arg
def _rev_convert(self, arg, typ):
return arg
def _check_arg_types(self, in_args, arg_types):
in_len = len(in_args)
in_typ = len(arg_types)
if in_len > in_typ:
raise CObjException("Too many arguments: expected %s got %s" % (
in_typ, in_len))
elif in_len < in_typ:
raise CObjException("Too few arguments: expected %s got %s" % (
in_typ, in_len))
da_args = []
for i in xrange(0, in_len):
da_args.append(self._convert(in_args[i], arg_types[i]))
return da_args
def c_ob(self):
if type(self._ccu_ob) == type(()):
return self._ccu_ob[0]
else:
return self._ccu_ob
def _set_c_ob(self, ob):
self._ccu_ob = ob
@classmethod
def __make_regexp_list(cls, in_list):
ret_val = []
for pat in in_list:
ret_val.append(re.compile(pat))
return ret_val
@classmethod
def __in_reg_list(cls, sym, re_list):
for pat in re_list:
if pat.match(sym) is not None:
return True
return False
@classmethod
def __get_func_info(cls, sym):
# Get function from module
func = getattr(cls._ccu_module, sym)
if func is None:
return None
# Function must have this attribute even though it may be None
if not hasattr(func, "restype"):
return None
res = getattr(func, "restype")
# Function must have this attribute with at least one argument
if not hasattr(func, "argtypes"):
return None
f_args = getattr(func, "argtypes")
if (len(f_args) < 1
and type(res) is not type(ctypes.POINTER(cls._ccu_ob_class))):
return None
return (func, res, f_args)
_SIMPLE_FUNC = 1
_CONSTRUCTOR_FUNC = 2
_DESTRUCTOR_FUNC = 3
@classmethod
def __add_symbol(cls, match_ob, f_type=_SIMPLE_FUNC):
if cls._DEBUG_LEVEL > 2:
print "Adding"
finfo = cls.__get_func_info(match_ob.group())
if finfo is None:
if cls._DEBUG_LEVEL > 1:
print "finfo is None"
return False
if f_type == cls._CONSTRUCTOR_FUNC:
def _tmp_func_(self, *args):
da_args = self._check_arg_types(args, finfo[2])
return finfo[0](*da_args),
else:
if finfo[2][0] != ctypes.POINTER(cls._ccu_ob_class):
if cls._DEBUG_LEVEL > 0:
print "First argument required to be pointer to class"
return False
def _tmp_func_(self, *args):
da_args = self._check_arg_types(args, finfo[2][1:])
return self._rev_convert(finfo[0](self.c_ob(), *da_args),
finfo[1])
if f_type == cls._SIMPLE_FUNC:
func_name = match_ob.groups()[0]
elif f_type == cls._CONSTRUCTOR_FUNC:
func_name = '_ccu_constructor_' + match_ob.groups()[0]
elif f_type == cls._DESTRUCTOR_FUNC:
func_name = '_ccu_destructor_' + match_ob.groups()[0]
if cls._DEBUG_LEVEL > 0:
print "Setting func %s in %s to %s" % (
func_name, cls, _tmp_func_)
setattr(cls, func_name, _tmp_func_)
return True
@classmethod
def __add_constructor(cls, c_syms):
if hasattr(cls, "_ccu_init"):
if cls._DEBUG_LEVEL > 1:
print "Class already has constructor. Skipping"
return False
if len(c_syms) == 0:
if cls._DEBUG_LEVEL > 1:
print "No constructor funcs found"
return False
elif len(c_syms) == 1:
if cls._DEBUG_LEVEL > 0:
print "Constructor func: %s" % c_syms[0]
finfo = cls.__get_func_info(c_syms[0])
if finfo is None:
return False
def _tmp_ccu_init(self, *args):
da_args = self._check_arg_types(args, finfo[2])
self._set_c_ob(finfo[0](*da_args))
return
setattr(cls, "_ccu_init", _tmp_ccu_init)
return True
else:
if cls._DEBUG_LEVEL > 2:
print "Multiple constructor funcs: %s" % c_syms
raise CObjException(
"No _ccu_init defined and multiple constructor candidates: %s"
% c_syms)
return False
@classmethod
def __add_destructor(cls, d_syms):
if hasattr(cls, "__del__"):
if cls._DEBUG_LEVEL > 1:
print "Class already has destructor. Skipping"
return False
if len(d_syms) == 0:
if cls._DEBUG_LEVEL > 1:
print "No destructor funcs found"
return False
elif len(d_syms) == 1:
if cls._DEBUG_LEVEL > 0:
print "Destructor func: %s" % d_syms[0]
finfo = cls.__get_func_info(d_syms[0])
if finfo is None:
return False
if len(finfo[2]) > 1:
raise CObjException(
"Destructor func needs arguments. Can't wrap.")
if finfo[2][0] != ctypes.POINTER(cls._ccu_ob_class):
raise CObjException(
"Destructor's first arg needs to be a pointer to class")
def _tmp_del(self):
if hasattr(self, '_ccu_ob'):
if cls._DEBUG_LEVEL > 1:
print "Deleted object type: %s" % type(self.c_ob())
finfo[0](self.c_ob())
self._set_c_ob(None)
else:
print "No _ccu_ob in self"
return
setattr(cls, "__del__", _tmp_del)
return True
else:
if cls._DEBUG_LEVEL > 1:
print "Multiple destructor funcs: %s" % d_syms
raise CObjException(
"No __del__ defined and multiple destructor candidates: %s"
% d_syms)
return False
@classmethod
def __find_struct(cls, mod, object_name):
# module must contain a class with name object_name
if not hasattr(mod, object_name):
raise CObjException(
"No %s contained in %s" % (object_name, mod))
# and it can't be None
ob_class = getattr(mod, object_name)
if ob_class is None:
raise CObjException(
"%s in %s is None" % (object_name, mod))
# and it must be a class derived from ctypes.Structure
if not inspect.isclass(ob_class):
raise CObjException(
"%s from %s is not a class" % (object_name, mod))
if not issubclass(ob_class, ctypes.Structure):
raise CObjException(
"%s from %s is not derived from ctypes.Structure" % (
object_name, mod))
return ob_class
@classmethod
def __find_module(cls, module_name, object_name):
try:
mod = __import__(module_name, globals(), locals(), [object_name])
except StandardError, e:
raise CObjException(e)
if mod is None:
raise CObjException("No module %s found" % module_name)
return mod
@classmethod
def initialize_from_module(
cls,
module_name=None,
object_name=None,
include_patterns=[],
exclude_patterns=[],
constructor_patterns=[ "new.*", "create.*", "alloc.*" ],
destructor_patterns=[ "destroy.*", "free.*",
"dealloc.*", "unalloc.*" ]):
if cls.__name__ == "CObj":
raise CObjException("Cannot initialize base CObj class")
cls._ccu_module = cls.__find_module(module_name, object_name)
cls._ccu_ob_class = cls.__find_struct(cls._ccu_module, object_name)
cls._ccu_found_sym = False
syms = dir(cls._ccu_module)
cls._ccu_e_regs = cls.__make_regexp_list(exclude_patterns)
cls._ccu_i_regs = cls.__make_regexp_list(include_patterns)
cls._ccu_c_regs = cls.__make_regexp_list(constructor_patterns)
cls._ccu_d_regs = cls.__make_regexp_list(destructor_patterns)
c_syms = []
d_syms = []
for sym in syms:
if cls._DEBUG_LEVEL > 2:
print "------------------\n%s" % sym
if not cls.__in_reg_list(sym, cls._ccu_e_regs):
for pat in cls._ccu_i_regs:
match_ob = pat.match(sym)
if match_ob is not None:
if cls.__in_reg_list(match_ob.groups()[0],
cls._ccu_c_regs):
c_syms.append(sym)
if cls.__add_symbol(match_ob,
cls._CONSTRUCTOR_FUNC):
cls._ccu_found_sym = True
elif cls.__in_reg_list(match_ob.groups()[0],
cls._ccu_d_regs):
d_syms.append(sym)
if cls.__add_symbol(match_ob,
cls._DESTRUCTOR_FUNC):
cls._ccu_found_sym = True
elif cls.__add_symbol(match_ob):
cls._ccu_found_sym = True
if cls.__add_constructor(c_syms):
cls._ccu_found_sym = True
if cls.__add_destructor(d_syms):
cls._ccu_found_sym = True
if not cls._ccu_found_sym:
cls._ccu_module = None
raise CObjException(
"Could not find any functions in %s" % module_name)
return True
| gpl-2.0 | -8,758,843,443,152,185,000 | 30.900302 | 78 | 0.493229 | false |
googleapis/python-texttospeech | samples/snippets/ssml_addresses.py | 1 | 3857 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START tts_ssml_address_imports]
import html
from google.cloud import texttospeech
# [END tts_ssml_address_imports]
# [START tts_ssml_address_audio]
def ssml_to_audio(ssml_text, outfile):
# Generates SSML text from plaintext.
#
# Given a string of SSML text and an output file name, this function
# calls the Text-to-Speech API. The API returns a synthetic audio
# version of the text, formatted according to the SSML commands. This
# function saves the synthetic audio to the designated output file.
#
# Args:
# ssml_text: string of SSML text
# outfile: string name of file under which to save audio output
#
# Returns:
# nothing
# Instantiates a client
client = texttospeech.TextToSpeechClient()
# Sets the text input to be synthesized
synthesis_input = texttospeech.SynthesisInput(ssml=ssml_text)
# Builds the voice request, selects the language code ("en-US") and
# the SSML voice gender ("MALE")
voice = texttospeech.VoiceSelectionParams(
language_code="en-US", ssml_gender=texttospeech.SsmlVoiceGender.MALE
)
# Selects the type of audio file to return
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.MP3
)
# Performs the text-to-speech request on the text input with the selected
# voice parameters and audio file type
response = client.synthesize_speech(
input=synthesis_input, voice=voice, audio_config=audio_config
)
# Writes the synthetic audio to the output file.
with open(outfile, "wb") as out:
out.write(response.audio_content)
print("Audio content written to file " + outfile)
# [END tts_ssml_address_audio]
# [START tts_ssml_address_ssml]
def text_to_ssml(inputfile):
# Generates SSML text from plaintext.
# Given an input filename, this function converts the contents of the text
# file into a string of formatted SSML text. This function formats the SSML
# string so that, when synthesized, the synthetic audio will pause for two
# seconds between each line of the text file. This function also handles
# special text characters which might interfere with SSML commands.
#
# Args:
# inputfile: string name of plaintext file
#
# Returns:
# A string of SSML text based on plaintext input
# Parses lines of input file
with open(inputfile, "r") as f:
raw_lines = f.read()
# Replace special characters with HTML Ampersand Character Codes
# These Codes prevent the API from confusing text with
# SSML commands
# For example, '<' --> '<' and '&' --> '&'
escaped_lines = html.escape(raw_lines)
# Convert plaintext to SSML
# Wait two seconds between each address
ssml = "<speak>{}</speak>".format(
escaped_lines.replace("\n", '\n<break time="2s"/>')
)
# Return the concatenated string of ssml script
return ssml
# [END tts_ssml_address_ssml]
# [START tts_ssml_address_test]
def main():
# test example address file
plaintext = "resources/example.txt"
ssml_text = text_to_ssml(plaintext)
ssml_to_audio(ssml_text, "resources/example.mp3")
# [END tts_ssml_address_test]
if __name__ == "__main__":
main()
| apache-2.0 | -5,946,108,898,816,729,000 | 31.411765 | 79 | 0.692766 | false |
Tesora-Release/tesora-trove | trove/taskmanager/manager.py | 1 | 24762 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sets import Set
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import periodic_task
from oslo_utils import importutils
from trove.backup.models import Backup
import trove.common.cfg as cfg
from trove.common.context import TroveContext
from trove.common import exception
from trove.common.exception import ReplicationSlaveAttachError
from trove.common.exception import TroveError
from trove.common.i18n import _
from trove.common.notification import DBaaSQuotas, EndNotification
from trove.common import remote
import trove.common.rpc.version as rpc_version
from trove.common import server_group as srv_grp
from trove.common.strategies.cluster import strategy
from trove.datastore.models import DatastoreVersion
import trove.extensions.mgmt.instances.models as mgmtmodels
from trove.instance.tasks import InstanceTasks
from trove.taskmanager import models
from trove.taskmanager.models import FreshInstanceTasks, BuiltInstanceTasks
from trove.quota.quota import QUOTAS
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Manager(periodic_task.PeriodicTasks):
target = messaging.Target(version=rpc_version.RPC_API_VERSION)
def __init__(self):
super(Manager, self).__init__(CONF)
self.admin_context = TroveContext(
user=CONF.nova_proxy_admin_user,
auth_token=CONF.nova_proxy_admin_pass,
tenant=CONF.nova_proxy_admin_tenant_id)
if CONF.exists_notification_transformer:
self.exists_transformer = importutils.import_object(
CONF.exists_notification_transformer,
context=self.admin_context)
def resize_volume(self, context, instance_id, new_size):
with EndNotification(context):
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.resize_volume(new_size)
def resize_flavor(self, context, instance_id, old_flavor, new_flavor):
with EndNotification(context):
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.resize_flavor(old_flavor, new_flavor)
def reboot(self, context, instance_id):
with EndNotification(context):
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.reboot()
def restart(self, context, instance_id):
with EndNotification(context):
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.restart()
def detach_replica(self, context, instance_id):
with EndNotification(context):
slave = models.BuiltInstanceTasks.load(context, instance_id)
master_id = slave.slave_of_id
master = models.BuiltInstanceTasks.load(context, master_id)
slave.detach_replica(master)
if master.post_processing_required_for_replication():
slave_instances = [BuiltInstanceTasks.load(
context, slave_model.id) for slave_model in master.slaves]
slave_detail = [slave_instance.get_replication_detail()
for slave_instance in slave_instances]
master.complete_master_setup(slave_detail)
def _set_task_status(self, instances, status):
for instance in instances:
setattr(instance.db_info, 'task_status', status)
instance.db_info.save()
def promote_to_replica_source(self, context, instance_id):
def _promote_to_replica_source(old_master, master_candidate,
replica_models):
# First, we transition from the old master to new as quickly as
# possible to minimize the scope of unrecoverable error
old_master.make_read_only(True)
master_ips = old_master.detach_public_ips()
slave_ips = master_candidate.detach_public_ips()
latest_txn_id = old_master.get_latest_txn_id()
master_candidate.wait_for_txn(latest_txn_id)
master_candidate.detach_replica(old_master, for_failover=True)
master_candidate.enable_as_master()
old_master.attach_replica(master_candidate)
master_candidate.attach_public_ips(master_ips)
master_candidate.make_read_only(False)
old_master.attach_public_ips(slave_ips)
# At this point, should something go wrong, there
# should be a working master with some number of working slaves,
# and possibly some number of "orphaned" slaves
exception_replicas = []
for replica in replica_models:
try:
if replica.id != master_candidate.id:
replica.detach_replica(old_master, for_failover=True)
replica.attach_replica(master_candidate)
except exception.TroveError:
msg = _("promote-to-replica-source: Unable to migrate "
"replica %(slave)s from old replica source "
"%(old_master)s to new source %(new_master)s.")
msg_values = {
"slave": replica.id,
"old_master": old_master.id,
"new_master": master_candidate.id
}
LOG.exception(msg % msg_values)
exception_replicas.append(replica)
try:
old_master.demote_replication_master()
except Exception:
LOG.exception(_("Exception demoting old replica source"))
exception_replicas.append(old_master)
if master_candidate.post_processing_required_for_replication():
new_slaves = list(replica_models)
new_slaves.remove(master_candidate)
new_slaves.append(old_master)
new_slaves_detail = [slave.get_replication_detail()
for slave in new_slaves]
master_candidate.complete_master_setup(new_slaves_detail)
self._set_task_status([old_master] + replica_models,
InstanceTasks.NONE)
if exception_replicas:
self._set_task_status(exception_replicas,
InstanceTasks.PROMOTION_ERROR)
msg = _("promote-to-replica-source %(id)s: The following "
"replicas may not have been switched: %(replicas)s")
msg_values = {
"id": master_candidate.id,
"replicas": exception_replicas
}
raise ReplicationSlaveAttachError(msg % msg_values)
with EndNotification(context):
master_candidate = BuiltInstanceTasks.load(context, instance_id)
old_master = BuiltInstanceTasks.load(context,
master_candidate.slave_of_id)
replicas = []
for replica_dbinfo in old_master.slaves:
if replica_dbinfo.id == instance_id:
replica = master_candidate
else:
replica = BuiltInstanceTasks.load(context,
replica_dbinfo.id)
replicas.append(replica)
try:
_promote_to_replica_source(old_master, master_candidate,
replicas)
except ReplicationSlaveAttachError:
raise
except Exception:
self._set_task_status([old_master] + replicas,
InstanceTasks.PROMOTION_ERROR)
raise
# pulled out to facilitate testing
def _get_replica_txns(self, replica_models):
return [[repl] + repl.get_last_txn() for repl in replica_models]
def _most_current_replica(self, old_master, replica_models):
last_txns = self._get_replica_txns(replica_models)
master_ids = [txn[1] for txn in last_txns if txn[1]]
if len(Set(master_ids)) > 1:
raise TroveError(_("Replicas of %s not all replicating"
" from same master") % old_master.id)
return sorted(last_txns, key=lambda x: x[2], reverse=True)[0][0]
def eject_replica_source(self, context, instance_id):
def _eject_replica_source(old_master, replica_models):
master_candidate = self._most_current_replica(old_master,
replica_models)
master_ips = old_master.detach_public_ips()
slave_ips = master_candidate.detach_public_ips()
master_candidate.detach_replica(old_master, for_failover=True)
master_candidate.enable_as_master()
master_candidate.attach_public_ips(master_ips)
master_candidate.make_read_only(False)
old_master.attach_public_ips(slave_ips)
exception_replicas = []
for replica in replica_models:
try:
if replica.id != master_candidate.id:
replica.detach_replica(old_master, for_failover=True)
replica.attach_replica(master_candidate)
except exception.TroveError:
msg = _("eject-replica-source: Unable to migrate "
"replica %(slave)s from old replica source "
"%(old_master)s to new source %(new_master)s.")
msg_values = {
"slave": replica.id,
"old_master": old_master.id,
"new_master": master_candidate.id
}
LOG.exception(msg % msg_values)
exception_replicas.append(replica.id)
if master_candidate.post_processing_required_for_replication():
new_slaves = list(replica_models)
new_slaves.remove(master_candidate)
new_slaves_detail = [slave.get_replication_detail()
for slave in new_slaves]
master_candidate.complete_master_setup(new_slaves_detail)
self._set_task_status([old_master] + replica_models,
InstanceTasks.NONE)
if exception_replicas:
self._set_task_status(exception_replicas,
InstanceTasks.EJECTION_ERROR)
msg = _("eject-replica-source %(id)s: The following "
"replicas may not have been switched: %(replicas)s")
msg_values = {
"id": master_candidate.id,
"replicas": exception_replicas
}
raise ReplicationSlaveAttachError(msg % msg_values)
with EndNotification(context):
master = BuiltInstanceTasks.load(context, instance_id)
replicas = [BuiltInstanceTasks.load(context, dbinfo.id)
for dbinfo in master.slaves]
try:
_eject_replica_source(master, replicas)
except ReplicationSlaveAttachError:
raise
except Exception:
self._set_task_status([master] + replicas,
InstanceTasks.EJECTION_ERROR)
raise
def migrate(self, context, instance_id, host):
with EndNotification(context):
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.migrate(host)
def delete_instance(self, context, instance_id):
with EndNotification(context):
try:
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.delete_async()
except exception.UnprocessableEntity:
instance_tasks = models.FreshInstanceTasks.load(context,
instance_id)
instance_tasks.delete_async()
def delete_backup(self, context, backup_id):
with EndNotification(context):
models.BackupTasks.delete_backup(context, backup_id)
def create_backup(self, context, backup_info, instance_id):
with EndNotification(context, backup_id=backup_info['id']):
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.create_backup(backup_info)
def _create_replication_slave(self, context, instance_id, name, flavor,
image_id, databases, users,
datastore_manager, packages, volume_size,
availability_zone, root_password, nics,
overrides, slave_of_id, backup_id,
volume_type, modules):
if type(instance_id) in [list]:
ids = instance_id
root_passwords = root_password
else:
ids = [instance_id]
root_passwords = [root_password]
replica_number = 0
replica_backup_id = backup_id
replica_backup_created = False
replicas = []
master_instance_tasks = BuiltInstanceTasks.load(context, slave_of_id)
server_group = master_instance_tasks.server_group
scheduler_hints = srv_grp.ServerGroup.convert_to_hint(server_group)
LOG.debug("Using scheduler hints for locality: %s" % scheduler_hints)
try:
for replica_index in range(0, len(ids)):
try:
replica_number += 1
LOG.debug("Creating replica %d of %d."
% (replica_number, len(ids)))
instance_tasks = FreshInstanceTasks.load(
context, ids[replica_index])
snapshot = instance_tasks.get_replication_master_snapshot(
context, slave_of_id, flavor, replica_backup_id,
replica_number=replica_number)
replica_backup_id = snapshot['dataset']['snapshot_id']
replica_backup_created = (replica_backup_id is not None)
instance_tasks.create_instance(
flavor, image_id, databases, users, datastore_manager,
packages, volume_size, replica_backup_id,
availability_zone, root_passwords[replica_index],
nics, overrides, None, snapshot, volume_type,
modules, scheduler_hints)
replicas.append(instance_tasks)
except Exception:
# if it's the first replica, then we shouldn't continue
LOG.exception(_(
"Could not create replica %(num)d of %(count)d.")
% {'num': replica_number, 'count': len(ids)})
if replica_number == 1:
raise
for replica in replicas:
replica.wait_for_instance(CONF.restore_usage_timeout, flavor)
# Some datastores requires completing configuration of replication
# nodes with information that is only available after all the
# instances has been started.
if (master_instance_tasks
.post_processing_required_for_replication()):
slave_instances = [BuiltInstanceTasks.load(context, slave.id)
for slave in master_instance_tasks.slaves]
# Collect info from each slave post instance launch
slave_detail = [slave_instance.get_replication_detail()
for slave_instance in slave_instances]
# Pass info of all replication nodes to the master for
# replication setup completion
master_detail = master_instance_tasks.get_replication_detail()
master_instance_tasks.complete_master_setup(slave_detail)
# Pass info of all replication nodes to each slave for
# replication setup completion
for slave_instance in slave_instances:
slave_instance.complete_slave_setup(master_detail,
slave_detail)
# Push pending data/transactions from master to slaves
master_instance_tasks.sync_data_to_slaves()
# Set the status of all slave nodes to ACTIVE
for slave_instance in slave_instances:
slave_guest = remote.create_guest_client(
slave_instance.context, slave_instance.db_info.id,
slave_instance.datastore_version.manager)
slave_guest.cluster_complete()
finally:
if replica_backup_created:
Backup.delete(context, replica_backup_id)
def _create_instance(self, context, instance_id, name, flavor,
image_id, databases, users, datastore_manager,
packages, volume_size, backup_id, availability_zone,
root_password, nics, overrides, slave_of_id,
cluster_config, volume_type, modules, locality):
if slave_of_id:
self._create_replication_slave(context, instance_id, name,
flavor, image_id, databases, users,
datastore_manager, packages,
volume_size,
availability_zone, root_password,
nics, overrides, slave_of_id,
backup_id, volume_type, modules)
else:
if type(instance_id) in [list]:
raise AttributeError(_(
"Cannot create multiple non-replica instances."))
instance_tasks = FreshInstanceTasks.load(context, instance_id)
scheduler_hints = srv_grp.ServerGroup.build_scheduler_hint(
context, locality, instance_id)
instance_tasks.create_instance(flavor, image_id, databases, users,
datastore_manager, packages,
volume_size, backup_id,
availability_zone, root_password,
nics, overrides, cluster_config,
None, volume_type, modules,
scheduler_hints)
timeout = (CONF.restore_usage_timeout if backup_id
else CONF.usage_timeout)
instance_tasks.wait_for_instance(timeout, flavor)
def create_instance(self, context, instance_id, name, flavor,
image_id, databases, users, datastore_manager,
packages, volume_size, backup_id, availability_zone,
root_password, nics, overrides, slave_of_id,
cluster_config, volume_type, modules, locality):
with EndNotification(context,
instance_id=(instance_id[0]
if type(instance_id) is list
else instance_id)):
self._create_instance(context, instance_id, name, flavor,
image_id, databases, users,
datastore_manager, packages, volume_size,
backup_id, availability_zone,
root_password, nics, overrides, slave_of_id,
cluster_config, volume_type, modules,
locality)
def upgrade(self, context, instance_id, datastore_version_id):
instance_tasks = models.BuiltInstanceTasks.load(context, instance_id)
datastore_version = DatastoreVersion.load_by_uuid(datastore_version_id)
with EndNotification(context):
instance_tasks.upgrade(datastore_version)
def update_overrides(self, context, instance_id, overrides):
instance_tasks = models.BuiltInstanceTasks.load(context, instance_id)
instance_tasks.update_overrides(overrides)
def unassign_configuration(self, context, instance_id, flavor,
configuration_id):
instance_tasks = models.BuiltInstanceTasks.load(context, instance_id)
instance_tasks.unassign_configuration(flavor, configuration_id)
def create_cluster(self, context, cluster_id):
with EndNotification(context, cluster_id=cluster_id):
cluster_tasks = models.load_cluster_tasks(context, cluster_id)
cluster_tasks.create_cluster(context, cluster_id)
def grow_cluster(self, context, cluster_id, new_instance_ids):
cluster_tasks = models.load_cluster_tasks(context, cluster_id)
cluster_tasks.grow_cluster(context, cluster_id, new_instance_ids)
def shrink_cluster(self, context, cluster_id, instance_ids):
cluster_tasks = models.load_cluster_tasks(context, cluster_id)
cluster_tasks.shrink_cluster(context, cluster_id, instance_ids)
def delete_cluster(self, context, cluster_id):
with EndNotification(context):
cluster_tasks = models.load_cluster_tasks(context, cluster_id)
cluster_tasks.delete_cluster(context, cluster_id)
if CONF.exists_notification_transformer:
@periodic_task.periodic_task
def publish_exists_event(self, context):
"""
Push this in Instance Tasks to fetch a report/collection
:param context: currently None as specied in bin script
"""
mgmtmodels.publish_exist_events(self.exists_transformer,
self.admin_context)
if CONF.quota_notification_interval:
@periodic_task.periodic_task(spacing=CONF.quota_notification_interval)
def publish_quota_notifications(self, context):
nova_client = remote.create_nova_client(self.admin_context)
for tenant in nova_client.tenants.list():
for quota in QUOTAS.get_all_quotas_by_tenant(tenant.id):
usage = QUOTAS.get_quota_usage(quota)
DBaaSQuotas(self.admin_context, quota, usage).notify()
def __getattr__(self, name):
"""
We should only get here if Python couldn't find a "real" method.
"""
def raise_error(msg):
raise AttributeError(msg)
manager, sep, method = name.partition('_')
if not manager:
raise_error('Cannot derive manager from attribute name "%s"' %
name)
task_strategy = strategy.load_taskmanager_strategy(manager)
if not task_strategy:
raise_error('No task manager strategy for manager "%s"' % manager)
if method not in task_strategy.task_manager_manager_actions:
raise_error('No method "%s" for task manager strategy for manager'
' "%s"' % (method, manager))
return task_strategy.task_manager_manager_actions.get(method)
| apache-2.0 | -7,253,688,209,542,132,000 | 47.457926 | 79 | 0.560577 | false |
jbalogh/zamboni | apps/devhub/helpers.py | 1 | 6396 | from collections import defaultdict
import urllib
import chardet
import jinja2
from jingo import register
from jingo.helpers import datetime
from tower import ugettext as _, ungettext as ngettext
import amo
from amo.urlresolvers import reverse
from amo.helpers import breadcrumbs, page_title
from access import acl
from addons.helpers import new_context
register.function(acl.check_addon_ownership)
@register.inclusion_tag('devhub/addons/listing/items.html')
@jinja2.contextfunction
def dev_addon_listing_items(context, addons, src=None, notes={}):
return new_context(**locals())
@register.function
@jinja2.contextfunction
def dev_page_title(context, title=None, addon=None):
"""Wrapper for devhub page titles."""
if addon:
title = u'%s :: %s' % (title, addon.name)
else:
devhub = _('Developer Hub')
title = '%s :: %s' % (title, devhub) if title else devhub
return page_title(context, title)
@register.function
@jinja2.contextfunction
def docs_page_title(context, title=None):
"""Wrapper for docs page titles."""
devhub = _('Add-on Documentation :: Developer Hub')
title = '%s :: %s' % (title, devhub) if title else devhub
return page_title(context, title)
@register.function
@jinja2.contextfunction
def dev_breadcrumbs(context, addon=None, items=None, add_default=False):
"""
Wrapper function for ``breadcrumbs``. Prepends 'Developer Hub'
breadcrumbs.
**items**
list of [(url, label)] to be inserted after Add-on.
**addon**
Adds the Add-on name to the end of the trail. If items are
specified then the Add-on will be linked.
**add_default**
Prepends trail back to home when True. Default is False.
"""
crumbs = [(reverse('devhub.index'), _('Developer Hub'))]
if not addon and not items:
# We are at the end of the crumb trail.
crumbs.append((None, _('My Add-ons')))
else:
crumbs.append((reverse('devhub.addons'), _('My Add-ons')))
if addon:
if items:
url = reverse('devhub.addons.edit', args=[addon.slug])
else:
# The Addon is the end of the trail.
url = None
crumbs.append((url, addon.name))
if items:
crumbs.extend(items)
return breadcrumbs(context, crumbs, add_default)
@register.function
@jinja2.contextfunction
def docs_breadcrumbs(context, items=None):
"""
Wrapper function for `breadcrumbs` for devhub docs.
"""
crumbs = [(reverse('devhub.index'), _('Developer Hub')),
(None, _('Developer Docs'))]
if items:
crumbs.extend(items)
return breadcrumbs(context, crumbs, True)
@register.inclusion_tag('devhub/versions/add_file_modal.html')
@jinja2.contextfunction
def add_file_modal(context, title, action, upload_url, action_label):
return new_context(modal_type='file', context=context, title=title,
action=action, upload_url=upload_url,
action_label=action_label)
@register.inclusion_tag('devhub/versions/add_file_modal.html')
@jinja2.contextfunction
def add_version_modal(context, title, action, upload_url, action_label):
return new_context(modal_type='version', context=context, title=title,
action=action, upload_url=upload_url,
action_label=action_label)
@register.function
def status_choices(addon):
"""Return a dict like STATUS_CHOICES customized for the addon status."""
# Show "awaiting full review" for unreviewed files on that track.
choices = dict(amo.STATUS_CHOICES)
if addon.status in (amo.STATUS_NOMINATED, amo.STATUS_LITE_AND_NOMINATED,
amo.STATUS_PUBLIC):
choices[amo.STATUS_UNREVIEWED] = choices[amo.STATUS_NOMINATED]
return choices
@register.inclusion_tag('devhub/versions/file_status_message.html')
def file_status_message(file, addon, file_history=False):
choices = status_choices(addon)
return {'fileid': file.id, 'platform': file.amo_platform.name,
'created': datetime(file.created),
'status': choices[file.status],
'file_history': file_history,
'actions': amo.LOG_REVIEW_EMAIL_USER,
'status_date': datetime(file.datestatuschanged)}
@register.function
def dev_files_status(files, addon):
"""Group files by their status (and files per status)."""
status_count = defaultdict(int)
choices = status_choices(addon)
for file in files:
status_count[file.status] += 1
return [(count, unicode(choices[status])) for
(status, count) in status_count.items()]
@register.function
def status_class(addon):
classes = {
amo.STATUS_NULL: 'incomplete',
amo.STATUS_UNREVIEWED: 'unreviewed',
amo.STATUS_NOMINATED: 'nominated',
amo.STATUS_PUBLIC: 'fully-approved',
amo.STATUS_DISABLED: 'admin-disabled',
amo.STATUS_LITE: 'lite',
amo.STATUS_LITE_AND_NOMINATED: 'lite-nom',
amo.STATUS_PURGATORY: 'purgatory',
}
if addon.disabled_by_user and addon.status != amo.STATUS_DISABLED:
cls = 'disabled'
else:
cls = classes.get(addon.status, 'none')
return 'status-' + cls
@register.function
def log_action_class(action_id):
if action_id in amo.LOG_BY_ID:
cls = amo.LOG_BY_ID[action_id].action_class
if cls is not None:
return 'action-' + cls
@register.function
def summarize_validation(validation):
"""Readable summary of add-on validation results."""
# L10n: first parameter is the number of errors
errors = ngettext('{0} error', '{0} errors',
validation.errors).format(validation.errors)
# L10n: first parameter is the number of warnings
warnings = ngettext('{0} warning', '{0} warnings',
validation.warnings).format(validation.warnings)
return "%s, %s" % (errors, warnings)
@register.filter
def display_url(url):
"""Display a URL like the browser URL bar would.
Note: returns a Unicode object, not a valid URL.
"""
if isinstance(url, unicode):
# Byte sequences will be url encoded so convert
# to bytes here just to stop auto decoding.
url = url.encode('utf8')
bytes = urllib.unquote(url)
c = chardet.detect(bytes)
return bytes.decode(c['encoding'], 'replace')
| bsd-3-clause | 5,686,896,041,450,829,000 | 31.467005 | 76 | 0.653377 | false |
tensorflow/models | official/modeling/optimization/lr_schedule_test.py | 1 | 3951 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for lr_schedule."""
from absl.testing import parameterized
import tensorflow as tf
from official.modeling.optimization import lr_schedule
class PowerAndLinearDecayTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
dict(
testcase_name='power_only',
init_lr=1.0,
power=-1.0,
linear_decay_fraction=0.0,
total_decay_steps=100,
offset=0,
expected=[[0, 1.0], [1, 1.0], [40, 1. / 40.], [60, 1. / 60],
[100, 1. / 100]]),
dict(
testcase_name='linear_only',
init_lr=1.0,
power=0.0,
linear_decay_fraction=1.0,
total_decay_steps=100,
offset=0,
expected=[[0, 1.0], [1, 0.99], [40, 0.6], [60, 0.4], [100, 0.0]]),
dict(
testcase_name='general',
init_lr=1.0,
power=-1.0,
linear_decay_fraction=0.5,
total_decay_steps=100,
offset=0,
expected=[[0, 1.0], [1, 1.0], [40, 1. / 40.],
[60, 1. / 60. * 0.8], [100, 0.0]]),
dict(
testcase_name='offset',
init_lr=1.0,
power=-1.0,
linear_decay_fraction=0.5,
total_decay_steps=100,
offset=90,
expected=[[0, 1.0], [90, 1.0], [91, 1.0], [130, 1. / 40.],
[150, 1. / 60. * 0.8], [190, 0.0], [200, 0.0]]),
)
def test_power_linear_lr_schedule(self, init_lr, power, linear_decay_fraction,
total_decay_steps, offset, expected):
lr = lr_schedule.PowerAndLinearDecay(
initial_learning_rate=init_lr,
power=power,
linear_decay_fraction=linear_decay_fraction,
total_decay_steps=total_decay_steps,
offset=offset)
for step, value in expected:
self.assertAlmostEqual(lr(step).numpy(), value)
class OffsetLearningRateTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
dict(class_name=lr_schedule.PiecewiseConstantDecayWithOffset),
dict(class_name=lr_schedule.PolynomialDecayWithOffset),
dict(class_name=lr_schedule.ExponentialDecayWithOffset),
dict(class_name=lr_schedule.CosineDecayWithOffset),
)
def test_generated_docstring(self, class_name):
self.assertNotEmpty(class_name.__init__.__doc__)
@parameterized.parameters(
dict(
class_name=lr_schedule.PiecewiseConstantDecayWithOffset,
kwarg=dict(boundaries=[50, 80], values=[1.0, 0.5, 0.1])),
dict(
class_name=lr_schedule.PolynomialDecayWithOffset,
kwarg=dict(initial_learning_rate=1.0, decay_steps=100)),
dict(
class_name=lr_schedule.ExponentialDecayWithOffset,
kwarg=dict(
initial_learning_rate=1.0, decay_steps=100, decay_rate=0.5)),
dict(
class_name=lr_schedule.CosineDecayWithOffset,
kwarg=dict(initial_learning_rate=1.0, decay_steps=100)),
)
def test_offset(self, class_name, kwarg):
offset = 10
offset_lr = class_name(offset=offset, **kwarg)
base_lr = class_name.base_lr_class(**kwarg)
self.assertIsInstance(offset_lr, class_name)
for step in range(10, 101, 10):
self.assertEqual(offset_lr(step), base_lr(step - offset))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 126,090,165,556,039,900 | 35.247706 | 80 | 0.61225 | false |
quicksloth/source-code-recommendation-server | src/server.py | 1 | 3442 | import os
import flask
import time
from flask import Flask, request, json
from flask_socketio import SocketIO
import requests
# from logging.config import fileConfig
# import logging
from Controllers.EvaluatorController import EvaluatorController
# fileConfig('logging.conf')
# log = logging.getLogger(__name__)
from Modules.Concepts.ComplexNetwork import ComplexNetwork
app = Flask(__name__, static_folder='')
app.config['SECRET_KEY'] = '@server-secret'
# socketio = SocketIO(app, allow_upgrades=True, engineio_logger=log, logger=log)
socketio = SocketIO(app, allow_upgrades=True)
complex_network = ComplexNetwork()
evaluator_controller = EvaluatorController(complex_network=complex_network)
class Socket:
"""
Class used to emit answer to specific client
"""
def __init__(self, sid):
self.sid = sid
self.connected = True
# Emits data to a socket's unique room
def emit(self, event, data):
print('going to emit to', self.sid)
socketio.emit(event, data, room=self.sid, namespace='/code-recommendations')
@app.route('/')
def index():
return 'Hello, World new version!'
@app.route('/source-codes', methods=['POST'])
def source_codes():
start = time.time()
evaluator_controller.evaluate_search_codes(request)
end = time.time()
print('Receive Source code and evaluate took', (end - start), 'seconds')
return json.dumps({'success': True})
def get_source_codes(data):
url = os.environ.get('CRAWLER_HOST', 'http://0.0.0.0:1111/crawl')
headers = {'Content-Type': 'application/json'}
print('going to request new version')
requests.request(url=url, method='GET', data=data, headers=headers)
@app.route('/train-network', methods=['POST'])
def train_network():
print('Train Start')
start = time.time()
evaluator_controller.train_network(train_database=request.get_json().get('train_text'))
end = time.time()
print('TrainNetwork took', (end - start), 'seconds')
return json.dumps({'success': True})
@app.route('/word-complex-network', methods=['GET'])
def get_complex_network():
resp = flask.Response(json.dumps(complex_network.adjacency_list))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/word-complex-network-cluster', methods=['GET'])
def get_complex_network_cluster():
resp = flask.Response(json.dumps(complex_network.cluster_list))
resp.headers['Content-Type'] = 'application/json'
return resp
# TODO: maybe use on connect
@socketio.on('connect')
def connect():
print('connectttsssss')
@socketio.on('getCodes', namespace='/code-recommendations')
def get_recommendation_codes(data):
data = json.loads(data)
print(data)
evaluator_controller.get_recommendation_code(request_id=request.sid,
language=data['language'],
query=data['query'],
comments=data['comments'],
libs=data['libs'])
def emit_code_recommendations(request_id, data):
Socket(request_id).emit('recommendationCodes', data)
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
# The port to be listening to — hence, the URL must be <hostname>:<port>/ inorder to send the request to this program
socketio.run(app, host='0.0.0.0', port=port)
| apache-2.0 | -6,203,669,130,902,017,000 | 30.272727 | 121 | 0.655814 | false |
niteoweb/libcloud | libcloud/dns/types.py | 1 | 4093 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.common.types import LibcloudError
__all__ = [
'Provider',
'RecordType',
'ZoneError',
'ZoneDoesNotExistError',
'ZoneAlreadyExistsError',
'RecordError',
'RecordDoesNotExistError',
'RecordAlreadyExistsError',
'OLD_CONSTANT_TO_NEW_MAPPING'
]
class Provider(object):
"""
Defines for each of the supported providers
Non-Dummy drivers are sorted in alphabetical order. Please preserve this
ordering when adding new drivers.
"""
DUMMY = 'dummy'
AURORADNS = 'auroradns'
BUDDYNS = 'buddyns'
CLOUDFLARE = 'cloudflare'
CLOUDNS = 'cloudns'
DIGITAL_OCEAN = 'digitalocean'
DNSIMPLE = 'dnsimple'
DNSPOD = 'dnspod'
DURABLEDNS = 'durabledns'
GANDI = 'gandi'
GODADDY = 'godaddy'
GOOGLE = 'google'
HOSTVIRTUAL = 'hostvirtual'
HPCLOUD = 'hpcloud'
LEASEWEB = 'leaseweb'
LINODE = 'linode'
LIQUIDWEB = 'liquidweb'
LUADNS = 'luadns'
NFSN = 'nfsn'
NSONE = 'nsone'
POINTDNS = 'pointdns'
POWERDNS = 'powerdns'
RACKSPACE = 'rackspace'
ROUTE53 = 'route53'
SOFTLAYER = 'softlayer'
VULTR = 'vultr'
WORLDWIDEDNS = 'worldwidedns'
ZERIGO = 'zerigo'
ZONOMI = 'zonomi'
# Deprecated
RACKSPACE_US = 'rackspace_us'
RACKSPACE_UK = 'rackspace_uk'
OLD_CONSTANT_TO_NEW_MAPPING = {
Provider.RACKSPACE_US: Provider.RACKSPACE,
Provider.RACKSPACE_UK: Provider.RACKSPACE,
}
class RecordType(object):
"""
DNS record type.
"""
A = 'A'
AAAA = 'AAAA'
AFSDB = 'A'
ALIAS = 'ALIAS'
CERT = 'CERT'
CNAME = 'CNAME'
DNAME = 'DNAME'
DNSKEY = 'DNSKEY'
DS = 'DS'
GEO = 'GEO'
HINFO = 'HINFO'
KEY = 'KEY'
LOC = 'LOC'
MX = 'MX'
NAPTR = 'NAPTR'
NS = 'NS'
NSEC = 'NSEC'
OPENPGPKEY = 'OPENPGPKEY'
PTR = 'PTR'
REDIRECT = 'REDIRECT'
RP = 'RP'
RRSIG = 'RRSIG'
SOA = 'SOA'
SPF = 'SPF'
SRV = 'SRV'
SSHFP = 'SSHFP'
TLSA = 'TLSA'
TXT = 'TXT'
URL = 'URL'
WKS = 'WKS'
class ZoneError(LibcloudError):
error_type = 'ZoneError'
kwargs = ('zone_id', )
def __init__(self, value, driver, zone_id):
self.zone_id = zone_id
super(ZoneError, self).__init__(value=value, driver=driver)
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<%s in %s, zone_id=%s, value=%s>' %
(self.error_type, repr(self.driver),
self.zone_id, self.value))
class ZoneDoesNotExistError(ZoneError):
error_type = 'ZoneDoesNotExistError'
class ZoneAlreadyExistsError(ZoneError):
error_type = 'ZoneAlreadyExistsError'
class RecordError(LibcloudError):
error_type = 'RecordError'
def __init__(self, value, driver, record_id):
self.record_id = record_id
super(RecordError, self).__init__(value=value, driver=driver)
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<%s in %s, record_id=%s, value=%s>' %
(self.error_type, repr(self.driver),
self.record_id, self.value))
class RecordDoesNotExistError(RecordError):
error_type = 'RecordDoesNotExistError'
class RecordAlreadyExistsError(RecordError):
error_type = 'RecordAlreadyExistsError'
| apache-2.0 | 4,471,243,542,061,305,300 | 24.265432 | 76 | 0.6301 | false |
dmoggles/pymudclient | pymudclient/tests/test_escape_parser.py | 1 | 2090 | from pymudclient.escape_parser import EscapeParser, InvalidInput, InvalidEscape
class TestEscapes(object):
def setUp(self):
self.eparser = EscapeParser()
tests = [('foo\n', #basic.
['foo']),
('\n',
['']),
('foo\\nbar\n', #multiple things on one input.
['foo', 'bar']),
('foo\\\nbar\n',
['foo\\', 'bar']),
('foo;bar\n', #semicolon linebreak this time
['foo', 'bar']),
('foo\\x0Abar\\012', #octal and hex escapes.
['foo', 'bar']),
('\\x57\n',
['\x57']),
('\\100\\10\\1\n',
['\100\10\1']),
('\\\\foo\n', #escaped backslashes.
['\\foo']),
('\\;bar\n', #escaped semicolons
[';bar']),
('\\foo\n', #and unknown escapes.
['\\foo'])]
error_tests = [('\\xoink\n', InvalidEscape),
('bar\\', InvalidInput),
('foo\\xb', InvalidEscape)]
def test_normals(self):
for test, expected in self.tests:
yield self.do_one_normal, test, expected
def do_one_normal(self, input, expected):
res = list(self.eparser.parse(input))
assert res == expected, res
def run_error_tests(self):
for test, err in self.error_tests:
yield self.do_one_error, test, err
def do_one_error(self, input, expected_err):
try:
res = list(self.eparser.parse(input))
except expected_err: #I love Python so much. :)
pass
else:
assert False, res
def test_retain_if_no_newline(self):
res = list(self.eparser.parse('foo'))
assert res == []
res = list(self.eparser.parse('\n'))
assert res == ['foo']
def test_chopped_octal_escape(self):
res = list(self.eparser.parse('foo\\1'))
assert res == []
res = list(self.eparser.parse('\n'))
assert res == ['foo\1']
| gpl-3.0 | 5,013,991,225,037,211,000 | 26.866667 | 79 | 0.466507 | false |
lduarte1991/edx-platform | common/lib/xmodule/xmodule/seq_module.py | 1 | 23961 | """
xModule implementation of a learning sequence
"""
# pylint: disable=abstract-method
import collections
import json
import logging
from datetime import datetime
from lxml import etree
from pkg_resources import resource_string
from pytz import UTC
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from xblock.fields import Boolean, Integer, List, Scope, String
from xblock.fragment import Fragment
from .exceptions import NotFoundError
from .fields import Date
from .mako_module import MakoModuleDescriptor
from .progress import Progress
from .x_module import STUDENT_VIEW, XModule
from .xml_module import XmlDescriptor
log = logging.getLogger(__name__)
try:
import newrelic.agent
except ImportError:
newrelic = None # pylint: disable=invalid-name
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['video', 'problem']
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class SequenceFields(object):
has_children = True
completion_mode = XBlockCompletionMode.AGGREGATOR
# NOTE: Position is 1-indexed. This is silly, but there are now student
# positions saved on prod, so it's not easy to fix.
position = Integer(help="Last tab viewed in this sequence", scope=Scope.user_state)
due = Date(
display_name=_("Due Date"),
help=_("Enter the date by which problems are due."),
scope=Scope.settings,
)
hide_after_due = Boolean(
display_name=_("Hide sequence content After Due Date"),
help=_(
"If set, the sequence content is hidden for non-staff users after the due date has passed."
),
default=False,
scope=Scope.settings,
)
is_entrance_exam = Boolean(
display_name=_("Is Entrance Exam"),
help=_(
"Tag this course module as an Entrance Exam. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.settings,
)
class ProctoringFields(object):
"""
Fields that are specific to Proctored or Timed Exams
"""
is_time_limited = Boolean(
display_name=_("Is Time Limited"),
help=_(
"This setting indicates whether students have a limited time"
" to view or interact with this courseware component."
),
default=False,
scope=Scope.settings,
)
default_time_limit_minutes = Integer(
display_name=_("Time Limit in Minutes"),
help=_(
"The number of minutes available to students for viewing or interacting with this courseware component."
),
default=None,
scope=Scope.settings,
)
is_proctored_enabled = Boolean(
display_name=_("Is Proctoring Enabled"),
help=_(
"This setting indicates whether this exam is a proctored exam."
),
default=False,
scope=Scope.settings,
)
exam_review_rules = String(
display_name=_("Software Secure Review Rules"),
help=_(
"This setting indicates what rules the proctoring team should follow when viewing the videos."
),
default='',
scope=Scope.settings,
)
is_practice_exam = Boolean(
display_name=_("Is Practice Exam"),
help=_(
"This setting indicates whether this exam is for testing purposes only. Practice exams are not verified."
),
default=False,
scope=Scope.settings,
)
def _get_course(self):
"""
Return course by course id.
"""
return self.descriptor.runtime.modulestore.get_course(self.course_id) # pylint: disable=no-member
@property
def is_timed_exam(self):
"""
Alias the permutation of above fields that corresponds to un-proctored timed exams
to the more clearly-named is_timed_exam
"""
return not self.is_proctored_enabled and not self.is_practice_exam and self.is_time_limited
@property
def is_proctored_exam(self):
""" Alias the is_proctored_enabled field to the more legible is_proctored_exam """
return self.is_proctored_enabled
@property
def allow_proctoring_opt_out(self):
"""
Returns true if the learner should be given the option to choose between
taking a proctored exam, or opting out to take the exam without proctoring.
"""
return self._get_course().allow_proctoring_opt_out
@is_proctored_exam.setter
def is_proctored_exam(self, value):
""" Alias the is_proctored_enabled field to the more legible is_proctored_exam """
self.is_proctored_enabled = value
@XBlock.wants('proctoring')
@XBlock.wants('verification')
@XBlock.wants('milestones')
@XBlock.wants('credit')
@XBlock.needs('user')
@XBlock.needs('bookmarks')
class SequenceModule(SequenceFields, ProctoringFields, XModule):
"""
Layout module which lays out content in a temporal sequence
"""
js = {
'js': [resource_string(__name__, 'js/src/sequence/display.js')],
}
css = {
'scss': [resource_string(__name__, 'css/sequence/display.scss')],
}
js_module_name = "Sequence"
def __init__(self, *args, **kwargs):
super(SequenceModule, self).__init__(*args, **kwargs)
# If position is specified in system, then use that instead.
position = getattr(self.system, 'position', None)
if position is not None:
assert isinstance(position, int)
self.position = self.system.position
def get_progress(self):
''' Return the total progress, adding total done and total available.
(assumes that each submodule uses the same "units" for progress.)
'''
# TODO: Cache progress or children array?
children = self.get_children()
progresses = [child.get_progress() for child in children]
progress = reduce(Progress.add_counts, progresses, None)
return progress
def handle_ajax(self, dispatch, data): # TODO: bounds checking
''' get = request.POST instance '''
if dispatch == 'goto_position':
# set position to default value if either 'position' argument not
# found in request or it is a non-positive integer
position = data.get('position', u'1')
if position.isdigit() and int(position) > 0:
self.position = int(position)
else:
self.position = 1
return json.dumps({'success': True})
raise NotFoundError('Unexpected dispatch type')
@classmethod
def verify_current_content_visibility(cls, date, hide_after_date):
"""
Returns whether the content visibility policy passes
for the given date and hide_after_date values and
the current date-time.
"""
return (
not date or
not hide_after_date or
datetime.now(UTC) < date
)
def student_view(self, context):
context = context or {}
self._capture_basic_metrics()
banner_text = None
special_html_view = self._hidden_content_student_view(context) or self._special_exam_student_view()
if special_html_view:
masquerading_as_specific_student = context.get('specific_masquerade', False)
banner_text, special_html = special_html_view
if special_html and not masquerading_as_specific_student:
return Fragment(special_html)
else:
banner_text = self._gated_content_staff_banner()
return self._student_view(context, banner_text)
def _special_exam_student_view(self):
"""
Checks whether this sequential is a special exam. If so, returns
a banner_text or the fragment to display depending on whether
staff is masquerading.
"""
if self.is_time_limited:
special_exam_html = self._time_limited_student_view()
if special_exam_html:
banner_text = _("This exam is hidden from the learner.")
return banner_text, special_exam_html
def _hidden_content_student_view(self, context):
"""
Checks whether the content of this sequential is hidden from the
runtime user. If so, returns a banner_text or the fragment to
display depending on whether staff is masquerading.
"""
course = self._get_course()
if not self._can_user_view_content(course):
if course.self_paced:
banner_text = _("Because the course has ended, this assignment is hidden from the learner.")
else:
banner_text = _("Because the due date has passed, this assignment is hidden from the learner.")
hidden_content_html = self.system.render_template(
'hidden_content.html',
{
'self_paced': course.self_paced,
'progress_url': context.get('progress_url'),
}
)
return banner_text, hidden_content_html
def _gated_content_staff_banner(self):
"""
Checks whether the content is gated for learners. If so,
returns a banner_text depending on whether user is staff.
"""
milestones_service = self.runtime.service(self, 'milestones')
if milestones_service:
content_milestones = milestones_service.get_course_content_milestones(
self.course_id, self.location, 'requires'
)
banner_text = _('This subsection is unlocked for learners when they meet the prerequisite requirements.')
if content_milestones and self.runtime.user_is_staff:
return banner_text
def _can_user_view_content(self, course):
"""
Returns whether the runtime user can view the content
of this sequential.
"""
hidden_date = course.end if course.self_paced else self.due
return (
self.runtime.user_is_staff or
self.verify_current_content_visibility(hidden_date, self.hide_after_due)
)
def is_user_authenticated(self, context):
# NOTE (CCB): We default to true to maintain the behavior in place prior to allowing anonymous access access.
return context.get('user_authenticated', True)
def _student_view(self, context, banner_text=None):
"""
Returns the rendered student view of the content of this
sequential. If banner_text is given, it is added to the
content.
"""
display_items = self.get_display_items()
self._update_position(context, len(display_items))
fragment = Fragment()
params = {
'items': self._render_student_view_for_items(context, display_items, fragment),
'element_id': self.location.html_id(),
'item_id': self.location.to_deprecated_string(),
'position': self.position,
'tag': self.location.category,
'ajax_url': self.system.ajax_url,
'next_url': context.get('next_url'),
'prev_url': context.get('prev_url'),
'banner_text': banner_text,
'disable_navigation': not self.is_user_authenticated(context),
}
fragment.add_content(self.system.render_template("seq_module.html", params))
self._capture_full_seq_item_metrics(display_items)
self._capture_current_unit_metrics(display_items)
return fragment
def _update_position(self, context, number_of_display_items):
"""
Update the user's sequential position given the context and the
number_of_display_items
"""
position = context.get('position')
if position:
self.position = position
# If we're rendering this sequence, but no position is set yet,
# or exceeds the length of the displayable items,
# default the position to the first element
if context.get('requested_child') == 'first':
self.position = 1
elif context.get('requested_child') == 'last':
self.position = number_of_display_items or 1
elif self.position is None or self.position > number_of_display_items:
self.position = 1
def _render_student_view_for_items(self, context, display_items, fragment):
"""
Updates the given fragment with rendered student views of the given
display_items. Returns a list of dict objects with information about
the given display_items.
"""
is_user_authenticated = self.is_user_authenticated(context)
bookmarks_service = self.runtime.service(self, 'bookmarks')
context['username'] = self.runtime.service(self, 'user').get_current_user().opt_attrs.get(
'edx-platform.username')
display_names = [
self.get_parent().display_name_with_default,
self.display_name_with_default
]
contents = []
for item in display_items:
# NOTE (CCB): This seems like a hack, but I don't see a better method of determining the type/category.
item_type = item.get_icon_class()
usage_id = item.scope_ids.usage_id
if item_type == 'problem' and not is_user_authenticated:
log.info(
'Problem [%s] was not rendered because anonymous access is not allowed for graded content',
usage_id
)
continue
show_bookmark_button = False
is_bookmarked = False
if is_user_authenticated:
show_bookmark_button = True
is_bookmarked = bookmarks_service.is_bookmarked(usage_key=usage_id)
context['show_bookmark_button'] = show_bookmark_button
context['bookmarked'] = is_bookmarked
rendered_item = item.render(STUDENT_VIEW, context)
fragment.add_frag_resources(rendered_item)
iteminfo = {
'content': rendered_item.content,
'page_title': getattr(item, 'tooltip_title', ''),
'type': item_type,
'id': usage_id.to_deprecated_string(),
'bookmarked': is_bookmarked,
'path': " > ".join(display_names + [item.display_name_with_default]),
}
contents.append(iteminfo)
return contents
def _locations_in_subtree(self, node):
"""
The usage keys for all descendants of an XBlock/XModule as a flat list.
Includes the location of the node passed in.
"""
stack = [node]
locations = []
while stack:
curr = stack.pop()
locations.append(curr.location)
if curr.has_children:
stack.extend(curr.get_children())
return locations
def _capture_basic_metrics(self):
"""
Capture basic information about this sequence in New Relic.
"""
if not newrelic:
return
newrelic.agent.add_custom_parameter('seq.block_id', unicode(self.location))
newrelic.agent.add_custom_parameter('seq.display_name', self.display_name or '')
newrelic.agent.add_custom_parameter('seq.position', self.position)
newrelic.agent.add_custom_parameter('seq.is_time_limited', self.is_time_limited)
def _capture_full_seq_item_metrics(self, display_items):
"""
Capture information about the number and types of XBlock content in
the sequence as a whole. We send this information to New Relic so that
we can do better performance analysis of courseware.
"""
if not newrelic:
return
# Basic count of the number of Units (a.k.a. VerticalBlocks) we have in
# this learning sequence
newrelic.agent.add_custom_parameter('seq.num_units', len(display_items))
# Count of all modules (leaf nodes) in this sequence (e.g. videos,
# problems, etc.) The units (verticals) themselves are not counted.
all_item_keys = self._locations_in_subtree(self)
newrelic.agent.add_custom_parameter('seq.num_items', len(all_item_keys))
# Count of all modules by block_type (e.g. "video": 2, "discussion": 4)
block_counts = collections.Counter(usage_key.block_type for usage_key in all_item_keys)
for block_type, count in block_counts.items():
newrelic.agent.add_custom_parameter('seq.block_counts.{}'.format(block_type), count)
def _capture_current_unit_metrics(self, display_items):
"""
Capture information about the current selected Unit within the Sequence.
"""
if not newrelic:
return
# Positions are stored with indexing starting at 1. If we get into a
# weird state where the saved position is out of bounds (e.g. the
# content was changed), avoid going into any details about this unit.
if 1 <= self.position <= len(display_items):
# Basic info about the Unit...
current = display_items[self.position - 1]
newrelic.agent.add_custom_parameter('seq.current.block_id', unicode(current.location))
newrelic.agent.add_custom_parameter('seq.current.display_name', current.display_name or '')
# Examining all items inside the Unit (or split_test, conditional, etc.)
child_locs = self._locations_in_subtree(current)
newrelic.agent.add_custom_parameter('seq.current.num_items', len(child_locs))
curr_block_counts = collections.Counter(usage_key.block_type for usage_key in child_locs)
for block_type, count in curr_block_counts.items():
newrelic.agent.add_custom_parameter('seq.current.block_counts.{}'.format(block_type), count)
def _time_limited_student_view(self):
"""
Delegated rendering of a student view when in a time
limited view. This ultimately calls down into edx_proctoring
pip installed djangoapp
"""
# None = no overridden view rendering
view_html = None
proctoring_service = self.runtime.service(self, 'proctoring')
credit_service = self.runtime.service(self, 'credit')
verification_service = self.runtime.service(self, 'verification')
# Is this sequence designated as a Timed Examination, which includes
# Proctored Exams
feature_enabled = (
proctoring_service and
credit_service and
self.is_time_limited
)
if feature_enabled:
user_id = self.runtime.user_id
user_role_in_course = 'staff' if self.runtime.user_is_staff else 'student'
course_id = self.runtime.course_id
content_id = self.location
context = {
'display_name': self.display_name,
'default_time_limit_mins': (
self.default_time_limit_minutes if
self.default_time_limit_minutes else 0
),
'is_practice_exam': self.is_practice_exam,
'allow_proctoring_opt_out': self.allow_proctoring_opt_out,
'due_date': self.due
}
# inject the user's credit requirements and fulfillments
if credit_service:
credit_state = credit_service.get_credit_state(user_id, course_id)
if credit_state:
context.update({
'credit_state': credit_state
})
# inject verification status
if verification_service:
verification_status, __ = verification_service.get_status(user_id)
context.update({
'verification_status': verification_status,
'reverify_url': verification_service.reverify_url(),
})
# See if the edx-proctoring subsystem wants to present
# a special view to the student rather
# than the actual sequence content
#
# This will return None if there is no
# overridden view to display given the
# current state of the user
view_html = proctoring_service.get_student_view(
user_id=user_id,
course_id=course_id,
content_id=content_id,
context=context,
user_role=user_role_in_course
)
return view_html
def get_icon_class(self):
child_classes = set(child.get_icon_class()
for child in self.get_children())
new_class = 'other'
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
class SequenceDescriptor(SequenceFields, ProctoringFields, MakoModuleDescriptor, XmlDescriptor):
"""
A Sequence's Descriptor object
"""
mako_template = 'widgets/sequence-edit.html'
module_class = SequenceModule
resources_dir = None
show_in_read_only_mode = True
js = {
'coffee': [resource_string(__name__, 'js/src/sequence/edit.coffee')],
}
js_module_name = "SequenceDescriptor"
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
for child in xml_object:
try:
child_block = system.process_xml(etree.tostring(child, encoding='unicode'))
children.append(child_block.scope_ids.usage_id)
except Exception as e:
log.exception("Unable to load child when parsing Sequence. Continuing...")
if system.error_tracker is not None:
system.error_tracker(u"ERROR: {0}".format(e))
continue
return {}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('sequential')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
@property
def non_editable_metadata_fields(self):
"""
`is_entrance_exam` should not be editable in the Studio settings editor.
"""
non_editable_fields = super(SequenceDescriptor, self).non_editable_metadata_fields
non_editable_fields.append(self.fields['is_entrance_exam'])
return non_editable_fields
def index_dictionary(self):
"""
Return dictionary prepared with module content and type for indexing.
"""
# return key/value fields in a Python dict object
# values may be numeric / string or dict
# default implementation is an empty dict
xblock_body = super(SequenceDescriptor, self).index_dictionary()
html_body = {
"display_name": self.display_name,
}
if "content" in xblock_body:
xblock_body["content"].update(html_body)
else:
xblock_body["content"] = html_body
xblock_body["content_type"] = "Sequence"
return xblock_body
class HighlightsFields(object):
"""Only Sections have summaries now, but we may expand that later."""
highlights = List(
help=_("A list summarizing what students should look forward to in this section."),
scope=Scope.settings
)
class SectionModule(HighlightsFields, SequenceModule):
"""Module for a Section/Chapter."""
class SectionDescriptor(HighlightsFields, SequenceDescriptor):
"""Descriptor for a Section/Chapter."""
module_class = SectionModule
| agpl-3.0 | 5,548,470,805,675,175,000 | 36.793375 | 117 | 0.611661 | false |
jerryhongthreefoldphotos/messagebus-python-sdk | messagebus/samples/webhooks/deletewebhook.py | 1 | 1669 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014 Message Bus
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
import sys
import os
path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
if not path in sys.path:
sys.path.insert(1, path)
del path
from messagebus import MessageBusWebhooksClient, MessageBusResponseError
api_key = '7215ee9c7d9dc229d2921a40e899ec5f'
webhook_key = '2ff80e9159b517704ce43f0f74e6e247'
def delete_webhook():
try:
webhook_client = MessageBusWebhooksClient(api_key)
results = webhook_client.delete_webhook(webhook_key)
except MessageBusResponseError, error:
raise error
else:
print "Successfully deleted webhook %s" % webhook_key
def get_webhooks():
try:
webhooks_client = MessageBusWebhooksClient(api_key)
results = webhooks_client.get_webhooks()
except MessageBusResponseError, error:
print error.message
else:
for webhook in results['webhooks']:
print "Webhook Key: %s" % webhook['webhook_key']
if __name__ == '__main__':
get_webhooks()
delete_webhook()
get_webhooks()
| apache-2.0 | -6,661,766,429,502,467,000 | 28.280702 | 75 | 0.696824 | false |
sauhaardac/training | nets/eric/smallmodels/SqueezeNetSqueezeLSTM.py | 1 | 4506 | """SqueezeNet 1.1 modified for LSTM regression."""
import logging
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
logging.basicConfig(filename='training.log', level=logging.DEBUG)
# from Parameters import ARGS
class Fire(nn.Module): # pylint: disable=too-few-public-methods
"""Implementation of Fire module"""
def __init__(self, inplanes, squeeze_planes,
expand1x1_planes, expand3x3_planes):
"""Sets up layers for Fire module"""
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
kernel_size=3, padding=1)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, input_data):
"""Forward-propagates data through Fire module"""
output_data = self.squeeze_activation(self.squeeze(input_data))
return torch.cat([
self.expand1x1_activation(self.expand1x1(output_data)),
self.expand3x3_activation(self.expand3x3(output_data))
], 1)
class SqueezeNetSqueezeLSTM(nn.Module): # pylint: disable=too-few-public-methods
"""SqueezeNet+LSTM for end to end autonomous driving"""
def __init__(self, n_steps=10, n_frames=2):
"""Sets up layers"""
super(SqueezeNetSqueezeLSTM, self).__init__()
self.n_frames = n_frames
self.n_steps = n_steps
self.pre_metadata_features = nn.Sequential(
nn.Conv2d(3 * 2 * self.n_frames, 16, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(16, 6, 12, 12)
)
self.post_metadata_features = nn.Sequential(
Fire(36, 8, 16, 16),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(32, 12, 24, 24),
Fire(48, 12, 24, 24),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(48, 16, 32, 32),
Fire(64, 16, 32, 32),
Fire(64, 24, 48, 48),
Fire(96, 24, 48, 48),
)
final_conv = nn.Conv2d(96, self.n_steps * 2, kernel_size=1)
self.pre_lstm_output = nn.Sequential(
nn.Dropout(p=0.5),
final_conv,
nn.AvgPool2d(kernel_size=3, stride=2),
)
self.lstms = nn.ModuleList([
nn.LSTM(16, 32, 1, batch_first=True),
nn.LSTM(32, 8, 1, batch_first=True),
nn.LSTM(8, 16, 1, batch_first=True),
nn.LSTM(16, 4, 1, batch_first=True)
])
for mod in self.modules():
if isinstance(mod, nn.Conv2d):
if mod is final_conv:
init.normal(mod.weight.data, mean=0.0, std=0.01)
else:
init.kaiming_uniform(mod.weight.data)
if mod.bias is not None:
mod.bias.data.zero_()
def forward(self, camera_data, metadata):
"""Forward-propagates data through SqueezeNetSqueezeLSTM"""
net_output = self.pre_metadata_features(camera_data)
net_output = torch.cat((net_output, metadata), 1)
net_output = self.post_metadata_features(net_output)
net_output = self.pre_lstm_output(net_output)
net_output = net_output.view(net_output.size(0), self.n_steps, -1)
for lstm in self.lstms:
net_output = lstm(net_output)[0]
net_output = net_output.contiguous().view(net_output.size(0), -1)
return net_output
def num_params(self):
return sum([reduce(lambda x, y: x * y, [dim for dim in p.size()], 1) for p in self.parameters()])
def unit_test():
"""Tests SqueezeNetSqueezeLSTM for size constitency"""
test_net = SqueezeNetSqueezeLSTM(20, 6)
test_net_output = test_net(
Variable(torch.randn(5, 36, 94, 168)),
Variable(torch.randn(5, 12, 23, 41)))
logging.debug('Net Test Output = {}'.format(test_net_output))
logging.debug('Network was Unit Tested')
print(test_net.num_params())
# for param in test_net.parameters():
unit_test()
Net = SqueezeNetSqueezeLSTM | mit | 4,046,708,033,570,522,600 | 37.194915 | 105 | 0.590546 | false |
h-mayorquin/camp_india_2016 | tutorials/LTPinnetworks2/Step3c_Zenke_etal_2014.py | 1 | 8540 | #!/usr/bin/env python
'''
Based on:
Zenke, Friedemann, Everton J. Agnes, and Wulfram Gerstner.
"Diverse Synaptic Plasticity Mechanisms Orchestrated to Form and Retrieve Memories in Spiking Neural Networks."
Nature Communications 6 (April 21, 2015).
Part of Zenke's rule embedded in modified Brunel 2000 / Ostojic 2014 network
author: Aditya Gilra, Jun 2016.
in Brian2rc3 for CAMP 2016.
'''
#import modules and functions to be used
from brian2 import * # importing brian also does:
# 'from pylab import *' which imports:
# matplot like commands into the namespace, further
# also can use np. for numpy and mpl. for matplotlib
from data_utils import *
stand_alone = True
if stand_alone: set_device('cpp_standalone', build_on_run=False)
else:
#prefs.codegen.target = 'numpy'
#prefs.codegen.target = 'weave'
prefs.codegen.target = 'cython'
import random
import time
np.random.seed(100) # set seed for reproducibility of simulations
random.seed(100) # set seed for reproducibility of simulations
# ###########################################
# Simulation parameters
# ###########################################
simdt = 0.1*ms
simtime = 10*second
defaultclock.dt = simdt # set Brian's sim time step
dt = simdt/second # convert to value in seconds
# ###########################################
# Neuron model
# ###########################################
# equation: dv/dt=(1/taum)*(-(v-el))
# with spike when v>vt, reset to vr
vt = 20.*mV # Spiking threshold
taum = 20.*ms # Membrane time constant
vr = 10.*mV # Reset potential
muext0 = 24*mV # external input to each neuron
taur = 0.5*ms # Refractory period
taudelay = 0.75*ms # synaptic delay
eqs_neurons='''
muext : volt
dv/dt=-v/taum + muext/taum : volt
'''
# ###########################################
# Network parameters: numbers
# ###########################################
N = 4096+1024 # Total number of neurons
fexc = 0.8 # Fraction of exc neurons
NE = int(fexc*N) # Number of excitatory cells
NI = N-NE # Number of inhibitory cells
# ###########################################
# Network parameters: synapses
# ###########################################
rescale = 2 # rescale C and J to maintain total input
C = 1000/rescale # Number of incoming connections on each neuron (exc or inh)
J = 0.01*mV*rescale # exc strength is J (in mV as we add to voltage)
# Critical J is ~ 0.45 mV in paper for N = 10000, C = 1000
g = 5.0 # -gJ is the inh strength. For exc-inh balance g>~f(1-f)=4
# ###########################################
# Network parameters: synaptic plasticity
# ###########################################
wmax = 10. # hard bound on synaptic weight
Apre_tau = 20*ms # STDP Apre LTP time constant; tauplus
Apost_tau = 20*ms # STDP Apost LTD time constant; tauminus
Apre0 = 1.0 # incr in Apre, on pre-spikes; Aplus for LTP
# at spike coincidence, delta w = -Apre0*eta
Apost0 = 1.0 # incr in Apost on post-spikes; Aminus for LTD
eta = 5e-2 # learning rate
Apostslow0 = 1.0 # incr in Apostslow on post spike
Apostslow_tau = 100*ms
stdp_eqns = ''' wsyn : 1
dApre/dt=-Apre/Apre_tau : 1 (event-driven)
dApost/dt=-Apost/Apost_tau : 1 (event-driven)
'''
w0 = 1.0 # reference weight
beta = 50 # LTP decay factor
alpha = 5 # LTD curvature factor
pre_eqns = 'Apre+=Apre0; wsyn = clip(wsyn - Apost*log(1+wsyn/w0*alpha)/log(1+alpha), 0,inf)'
post_eqns = 'Apost+=Apost0; wsyn = clip(wsyn + Apre*exp(-wsyn/w0/beta), 0,inf)'
# ###########################################
# Initialize neuron (sub)groups
# ###########################################
P=NeuronGroup(N,model=eqs_neurons,\
threshold='v>=vt',reset='v=vr',refractory=taur,method='euler')
P.v = uniform(0.,vt/mV,N)*mV
PE = P[:NE]
PI = P[NE:]
# ###########################################
# Connecting the network
# ###########################################
sparseness = C/float(N)
# E to E connections
#conEE = Synapses(PE,PE,'wsyn:1',on_pre='v_post+=wsyn*J',method='euler')
conEE = Synapses(PE,PE,stdp_eqns,\
on_pre=pre_eqns,on_post=post_eqns,\
method='euler')
#conEE.connect(condition='i!=j',p=sparseness)
# need exact connection indices for weight monitor in standalone mode
conEE_idxs_pre = []
conEE_idxs_post = []
Ce = int(fexc*C)
for k in range(NE):
conEE_idxs_pre.extend(Ce*[k])
idxs = range(NE)
idxs.remove(k) # no autapses i.e. no self-connections
l = np.random.permutation(idxs)[:Ce]
conEE_idxs_post.extend(l)
conEE_idxs_assembly = where(array(conEE_idxs_post)[:Ce*400]<400)[0]
conEE_idxs_cross = where(array(conEE_idxs_post)[:Ce*400]>400)[0]
conEE_idxs_bgnd = where(array(conEE_idxs_post)[Ce*400:]>400)[0]
conEE.connect(i=conEE_idxs_pre,j=conEE_idxs_post)
conEE.delay = taudelay
conEE.wsyn = 1.
# E to I connections
conIE = Synapses(PE,PI,'wsyn:1',on_pre='v_post+=wsyn*J',method='euler')
conIE.connect(p=sparseness)
conIE.delay = taudelay
conIE.wsyn = 1
# I to E connections
conEI = Synapses(PI,PE,'wsyn:1',on_pre='v_post+=wsyn*J',method='euler')
conEI.connect(p=sparseness)
conEI.delay = taudelay
conEI.wsyn = -g
# I to I connections
conII = Synapses(PI,PI,'wsyn:1',on_pre='v_post+=wsyn*J',method='euler')
conII.connect(condition='i!=j',p=sparseness)
conII.delay = taudelay
conII.wsyn = -g
# ###########################################
# Stimuli
# ###########################################
P.muext = muext0
## 400 neurons (~10%) receive stimulus current to increase firing
#Pstim = P[:400]
#Pstim.muext = muext0 + 7*mV
# ###########################################
# Setting up monitors
# ###########################################
Nmon = N
sm = SpikeMonitor(P)
# Population monitor
popm = PopulationRateMonitor(P)
# voltage monitor
sm_vm = StateMonitor(P,'v',record=range(10)+range(NE,NE+10))
# weights monitor
wm = StateMonitor(conEE,'wsyn', record=range(Ce*NE), dt=simtime/20.)
# ###########################################
# Simulate
# ###########################################
print "Setup complete, running for",simtime,"at dt =",dt,"s."
t1 = time.time()
run(simtime,report='text')
device.build(directory='output', compile=True, run=True, debug=False)
print 'inittime + runtime, t = ', time.time() - t1
#print "For g,J =",g,J,"mean exc rate =",\
# sm_e.num_spikes/float(NE)/(simtime/second),'Hz.'
#print "For g,J =",g,J,"mean inh rate =",\
# sm_i.num_spikes/float(NI)/(simtime/second),'Hz.'
# ###########################################
# Make plots
# ###########################################
# always convert spikemon.t and spikemon.i to array-s before indexing
# spikemon.i[] indexing is extremely slow!
spiket = array(sm.t/second) # take spiketimes of all neurons
spikei = array(sm.i)
fig = figure()
# raster plot
subplot(231)
plot(sm.t,sm.i,',')
title(str(N)+" exc & inh neurons")
xlim([simtime/second-1,simtime/second])
xlabel("")
print "plotting firing rates"
subplot(232)
tau=50e-3
sigma = tau/2.
# firing rates
timeseries = arange(0,simtime/second+dt,dt)
rate = np.zeros(int(simtime/simdt))
for nrni in range(400):
rate += rate_from_spiketrain(spiket,spikei,simtime/second,sigma,dt,nrni)
plot(timeseries[:len(rate)],rate/400.,'r')
rate = np.zeros(int(simtime/simdt))
for nrni in range(400,800):
rate += rate_from_spiketrain(spiket,spikei,simtime/second,sigma,dt,nrni)
plot(timeseries[:len(rate)],rate/400.,'b')
title("exc rates: assembly (r), bgnd (b)")
ylabel("Hz")
ylim(0,300)
subplot(233)
hist(wm.wsyn[:,-1],bins=500,edgecolor='none')
xlabel('weight')
ylabel('count')
subplot(235)
num_to_plot = 10
for nrni in range(NE,NE+num_to_plot):
rate = rate_from_spiketrain(spiket,spikei,simtime/second,sigma,dt,nrni)
plot(timeseries[:len(rate)],rate)
#print mean(rate),len(sm_i[nrni])
#rates.append(rate)
title(str(num_to_plot)+" inh rates")
ylim(0,300)
#print "Mean rate = ",mean(rates)
xlabel("time (s)")
ylabel("Hz")
print "plotting weights"
subplot(236)
plot(wm.t/second,mean(wm.wsyn[conEE_idxs_assembly,:],axis=0),color='r')
plot(wm.t/second,mean(wm.wsyn[conEE_idxs_cross,:],axis=0),color='m')
plot(wm.t/second,mean(wm.wsyn[conEE_idxs_bgnd,:],axis=0),color='b')
title("assembly weights (cross=m)")
ylabel("arb")
xlabel("time (s)")
print conEE.wsyn
fig.tight_layout()
show()
| mit | -5,000,495,323,026,433,000 | 30.865672 | 112 | 0.586885 | false |
mhnatiuk/phd_sociology_of_religion | scrapper/build/cffi/demo/readdir2.py | 1 | 1574 | # A Linux-only demo, using verify() instead of hard-coding the exact layouts
#
import sys
from cffi import FFI
if not sys.platform.startswith('linux'):
raise Exception("Linux-only demo")
ffi = FFI()
ffi.cdef("""
typedef ... DIR;
struct dirent {
unsigned char d_type; /* type of file; not supported
by all file system types */
char d_name[...]; /* filename */
...;
};
int readdir_r(DIR *dirp, struct dirent *entry, struct dirent **result);
int openat(int dirfd, const char *pathname, int flags);
DIR *fdopendir(int fd);
int closedir(DIR *dirp);
static const int DT_DIR;
""")
ffi.C = ffi.verify("""
#ifndef _ATFILE_SOURCE
# define _ATFILE_SOURCE
#endif
#ifndef _BSD_SOURCE
# define _BSD_SOURCE
#endif
#include <fcntl.h>
#include <sys/types.h>
#include <dirent.h>
""")
def walk(basefd, path):
print '{', path
dirfd = ffi.C.openat(basefd, path, 0)
if dirfd < 0:
# error in openat()
return
dir = ffi.C.fdopendir(dirfd)
dirent = ffi.new("struct dirent *")
result = ffi.new("struct dirent **")
while True:
if ffi.C.readdir_r(dir, dirent, result):
# error in readdir_r()
break
if result[0] == ffi.NULL:
break
name = ffi.string(dirent.d_name)
print '%3d %s' % (dirent.d_type, name)
if dirent.d_type == ffi.C.DT_DIR and name != '.' and name != '..':
walk(dirfd, name)
ffi.C.closedir(dir)
print '}'
walk(-1, "/tmp")
| gpl-2.0 | -2,870,046,567,128,240,000 | 22.848485 | 76 | 0.562897 | false |
B-MOOC/edx-platform | openedx/core/djangoapps/credit/migrations/0013_add_provider_status_url.py | 1 | 12404 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CreditProvider.provider_status_url'
db.add_column('credit_creditprovider', 'provider_status_url',
self.gf('django.db.models.fields.URLField')(default='', max_length=200),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CreditProvider.provider_status_url'
db.delete_column('credit_creditprovider', 'provider_status_url')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'credit.creditcourse': {
'Meta': {'object_name': 'CreditCourse'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'credit.crediteligibility': {
'Meta': {'unique_together': "(('username', 'course'),)", 'object_name': 'CreditEligibility'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'eligibilities'", 'to': "orm['credit.CreditCourse']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 6, 26, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'credit.creditprovider': {
'Meta': {'object_name': 'CreditProvider'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'enable_integration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'provider_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'provider_status_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'provider_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'})
},
'credit.creditrequest': {
'Meta': {'unique_together': "(('username', 'course', 'provider'),)", 'object_name': 'CreditRequest'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credit_requests'", 'to': "orm['credit.CreditCourse']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'parameters': ('jsonfield.fields.JSONField', [], {}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credit_requests'", 'to': "orm['credit.CreditProvider']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '255'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'credit.creditrequirement': {
'Meta': {'unique_together': "(('namespace', 'name', 'course'),)", 'object_name': 'CreditRequirement'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credit_requirements'", 'to': "orm['credit.CreditCourse']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'criteria': ('jsonfield.fields.JSONField', [], {}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'namespace': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'credit.creditrequirementstatus': {
'Meta': {'unique_together': "(('username', 'requirement'),)", 'object_name': 'CreditRequirementStatus'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'reason': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'requirement': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': "orm['credit.CreditRequirement']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'credit.historicalcreditrequest': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalCreditRequest'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.DO_NOTHING', 'to': "orm['credit.CreditCourse']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'parameters': ('jsonfield.fields.JSONField', [], {}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.DO_NOTHING', 'to': "orm['credit.CreditProvider']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '255'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'})
},
'credit.historicalcreditrequirementstatus': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalCreditRequirementStatus'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'reason': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'requirement': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.DO_NOTHING', 'to': "orm['credit.CreditRequirement']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
}
}
complete_apps = ['credit']
| agpl-3.0 | -8,845,472,775,627,292,000 | 80.605263 | 212 | 0.567156 | false |
ahaberlie/MetPy | tests/plots/test_declarative.py | 1 | 18565 | # Copyright (c) 2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the simplified plotting interface."""
from datetime import datetime, timedelta
from io import BytesIO
import warnings
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib
import pandas as pd
import pytest
from traitlets import TraitError
import xarray as xr
from metpy.cbook import get_test_data
from metpy.io import GiniFile
from metpy.plots import (BarbPlot, ContourPlot, FilledContourPlot, ImagePlot, MapPanel,
PanelContainer, PlotObs)
# Fixtures to make sure we have the right backend
from metpy.testing import set_agg_backend # noqa: F401, I202
from metpy.units import units
MPL_VERSION = matplotlib.__version__[:3]
@pytest.mark.mpl_image_compare(remove_text=True,
tolerance={'2.0': 3.09}.get(MPL_VERSION, 0.005))
def test_declarative_image():
"""Test making an image plot."""
data = xr.open_dataset(GiniFile(get_test_data('NHEM-MULTICOMP_1km_IR_20151208_2100.gini')))
img = ImagePlot()
img.data = data.metpy.parse_cf('IR')
img.colormap = 'Greys_r'
panel = MapPanel()
panel.title = 'Test'
panel.plots = [img]
pc = PanelContainer()
pc.panel = panel
pc.draw()
assert panel.ax.get_title() == 'Test'
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.022)
def test_declarative_contour():
"""Test making a contour plot."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.field = 'Temperature'
contour.level = 700 * units.hPa
contour.contours = 30
contour.linewidth = 1
contour.linecolor = 'red'
panel = MapPanel()
panel.area = 'us'
panel.proj = 'lcc'
panel.layers = ['coastline', 'borders', 'usstates']
panel.plots = [contour]
pc = PanelContainer()
pc.size = (8, 8)
pc.panels = [panel]
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.035)
def test_declarative_contour_options():
"""Test making a contour plot."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.field = 'Temperature'
contour.level = 700 * units.hPa
contour.contours = 30
contour.linewidth = 1
contour.linecolor = 'red'
contour.linestyle = 'dashed'
contour.clabels = True
panel = MapPanel()
panel.area = 'us'
panel.proj = 'lcc'
panel.layers = ['coastline', 'borders', 'usstates']
panel.plots = [contour]
pc = PanelContainer()
pc.size = (8, 8)
pc.panels = [panel]
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.016)
def test_declarative_events():
"""Test that resetting traitlets properly propagates."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.field = 'Temperature'
contour.level = 850 * units.hPa
contour.contours = 30
contour.linewidth = 1
contour.linecolor = 'red'
img = ImagePlot()
img.data = data
img.field = 'v_wind'
img.level = 700 * units.hPa
img.colormap = 'hot'
img.image_range = (3000, 5000)
panel = MapPanel()
panel.area = 'us'
panel.proj = 'lcc'
panel.layers = ['coastline', 'borders', 'states']
panel.plots = [contour, img]
pc = PanelContainer()
pc.size = (8, 8)
pc.panels = [panel]
pc.draw()
# Update some properties to make sure it regenerates the figure
contour.linewidth = 2
contour.linecolor = 'green'
contour.level = 700 * units.hPa
contour.field = 'Specific_humidity'
img.field = 'Geopotential_height'
img.colormap = 'plasma'
img.colorbar = 'horizontal'
return pc.figure
def test_no_field_error():
"""Make sure we get a useful error when the field is not set."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.level = 700 * units.hPa
with pytest.raises(ValueError):
contour.draw()
def test_no_field_error_barbs():
"""Make sure we get a useful error when the field is not set."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
barbs = BarbPlot()
barbs.data = data
barbs.level = 700 * units.hPa
with pytest.raises(TraitError):
barbs.draw()
def test_projection_object():
"""Test that we can pass a custom map projection."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.level = 700 * units.hPa
contour.field = 'Temperature'
panel = MapPanel()
panel.area = (-110, -60, 25, 55)
panel.projection = ccrs.Mercator()
panel.layers = [cfeature.LAKES]
panel.plots = [contour]
pc = PanelContainer()
pc.panel = panel
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.016)
def test_colorfill():
"""Test that we can use ContourFillPlot."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = FilledContourPlot()
contour.data = data
contour.level = 700 * units.hPa
contour.field = 'Temperature'
contour.colormap = 'coolwarm'
contour.colorbar = 'vertical'
panel = MapPanel()
panel.area = (-110, -60, 25, 55)
panel.layers = [cfeature.STATES]
panel.plots = [contour]
pc = PanelContainer()
pc.panel = panel
pc.size = (12, 8)
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.016)
def test_colorfill_horiz_colorbar():
"""Test that we can use ContourFillPlot."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = FilledContourPlot()
contour.data = data
contour.level = 700 * units.hPa
contour.field = 'Temperature'
contour.colormap = 'coolwarm'
contour.colorbar = 'horizontal'
panel = MapPanel()
panel.area = (-110, -60, 25, 55)
panel.layers = [cfeature.STATES]
panel.plots = [contour]
pc = PanelContainer()
pc.panel = panel
pc.size = (8, 8)
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.016)
def test_colorfill_no_colorbar():
"""Test that we can use ContourFillPlot."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = FilledContourPlot()
contour.data = data
contour.level = 700 * units.hPa
contour.field = 'Temperature'
contour.colormap = 'coolwarm'
contour.colorbar = None
panel = MapPanel()
panel.area = (-110, -60, 25, 55)
panel.layers = [cfeature.STATES]
panel.plots = [contour]
pc = PanelContainer()
pc.panel = panel
pc.size = (8, 8)
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=1.23)
def test_global():
"""Test that we can set global extent."""
data = xr.open_dataset(GiniFile(get_test_data('NHEM-MULTICOMP_1km_IR_20151208_2100.gini')))
img = ImagePlot()
img.data = data
img.field = 'IR'
img.colorbar = None
panel = MapPanel()
panel.area = 'global'
panel.plots = [img]
pc = PanelContainer()
pc.panel = panel
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True)
@pytest.mark.xfail(xr.__version__ < '0.11.0', reason='Does not work with older xarray.')
def test_latlon():
"""Test our handling of lat/lon information."""
data = xr.open_dataset(get_test_data('irma_gfs_example.nc', as_file_obj=False))
img = ImagePlot()
img.data = data
img.field = 'Temperature_isobaric'
img.level = 500 * units.hPa
img.time = datetime(2017, 9, 5, 15, 0, 0)
img.colorbar = None
contour = ContourPlot()
contour.data = data
contour.field = 'Geopotential_height_isobaric'
contour.level = img.level
contour.time = img.time
panel = MapPanel()
panel.projection = 'lcc'
panel.area = 'us'
panel.plots = [img, contour]
pc = PanelContainer()
pc.panel = panel
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.37)
def test_declarative_barb_options():
"""Test making a contour plot."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
barb = BarbPlot()
barb.data = data
barb.level = 300 * units.hPa
barb.field = ['u_wind', 'v_wind']
barb.skip = (10, 10)
barb.color = 'blue'
barb.pivot = 'tip'
barb.barblength = 6.5
panel = MapPanel()
panel.area = 'us'
panel.projection = 'data'
panel.layers = ['coastline', 'borders', 'usstates']
panel.plots = [barb]
pc = PanelContainer()
pc.size = (8, 8)
pc.panels = [panel]
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.612)
def test_declarative_barb_earth_relative():
"""Test making a contour plot."""
import numpy as np
data = xr.open_dataset(get_test_data('NAM_test.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.field = 'Geopotential_height_isobaric'
contour.level = 300 * units.hPa
contour.linecolor = 'red'
contour.linestyle = '-'
contour.linewidth = 2
contour.contours = np.arange(0, 20000, 120).tolist()
barb = BarbPlot()
barb.data = data
barb.level = 300 * units.hPa
barb.time = datetime(2016, 10, 31, 12)
barb.field = ['u-component_of_wind_isobaric', 'v-component_of_wind_isobaric']
barb.skip = (5, 5)
barb.color = 'black'
barb.barblength = 6.5
barb.earth_relative = False
panel = MapPanel()
panel.area = (-124, -72, 20, 53)
panel.projection = 'lcc'
panel.layers = ['coastline', 'borders', 'usstates']
panel.plots = [contour, barb]
pc = PanelContainer()
pc.size = (8, 8)
pc.panels = [panel]
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.346)
def test_declarative_barb_gfs():
"""Test making a contour plot."""
data = xr.open_dataset(get_test_data('GFS_test.nc', as_file_obj=False))
barb = BarbPlot()
barb.data = data
barb.level = 300 * units.hPa
barb.field = ['u-component_of_wind_isobaric', 'v-component_of_wind_isobaric']
barb.skip = (2, 2)
barb.earth_relative = False
panel = MapPanel()
panel.area = 'us'
panel.projection = 'data'
panel.layers = ['coastline', 'borders', 'usstates']
panel.plots = [barb]
pc = PanelContainer()
pc.size = (8, 8)
pc.panels = [panel]
pc.draw()
barb.level = 700 * units.hPa
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.022)
def test_declarative_sfc_obs():
"""Test making a surface observation plot."""
data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False),
infer_datetime_format=True, parse_dates=['valid'])
obs = PlotObs()
obs.data = data
obs.time = datetime(1993, 3, 12, 12)
obs.time_window = timedelta(minutes=15)
obs.level = None
obs.fields = ['tmpf']
obs.color = ['black']
# Panel for plot with Map features
panel = MapPanel()
panel.layout = (1, 1, 1)
panel.projection = ccrs.PlateCarree()
panel.area = 'in'
panel.layers = ['states']
panel.plots = [obs]
# Bringing it all together
pc = PanelContainer()
pc.size = (10, 10)
pc.panels = [panel]
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.022)
def test_declarative_sfc_obs_changes():
"""Test making a surface observation plot, changing the field."""
data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False),
infer_datetime_format=True, parse_dates=['valid'])
obs = PlotObs()
obs.data = data
obs.time = datetime(1993, 3, 12, 12)
obs.level = None
obs.fields = ['tmpf']
obs.colors = ['black']
obs.time_window = timedelta(minutes=15)
# Panel for plot with Map features
panel = MapPanel()
panel.layout = (1, 1, 1)
panel.projection = ccrs.PlateCarree()
panel.area = 'in'
panel.layers = ['states']
panel.plots = [obs]
panel.title = f'Surface Observations for {obs.time}'
# Bringing it all together
pc = PanelContainer()
pc.size = (10, 10)
pc.panels = [panel]
pc.draw()
obs.fields = ['dwpf']
obs.colors = ['green']
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0)
def test_declarative_colored_barbs():
"""Test making a surface plot with a colored barb (gh-1274)."""
data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False),
infer_datetime_format=True, parse_dates=['valid'])
obs = PlotObs()
obs.data = data
obs.time = datetime(1993, 3, 12, 13)
obs.level = None
obs.vector_field = ('uwind', 'vwind')
obs.vector_field_color = 'red'
obs.reduce_points = .5
# Panel for plot with Map features
panel = MapPanel()
panel.layout = (1, 1, 1)
panel.projection = ccrs.PlateCarree()
panel.area = 'NE'
panel.layers = ['states']
panel.plots = [obs]
# Bringing it all together
pc = PanelContainer()
pc.size = (10, 10)
pc.panels = [panel]
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.022)
def test_declarative_sfc_obs_full():
"""Test making a full surface observation plot."""
data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False),
infer_datetime_format=True, parse_dates=['valid'])
obs = PlotObs()
obs.data = data
obs.time = datetime(1993, 3, 12, 13)
obs.time_window = timedelta(minutes=15)
obs.level = None
obs.fields = ['tmpf', 'dwpf', 'emsl', 'cloud_cover', 'wxsym']
obs.locations = ['NW', 'SW', 'NE', 'C', 'W']
obs.colors = ['red', 'green', 'black', 'black', 'blue']
obs.formats = [None, None, lambda v: format(10 * v, '.0f')[-3:], 'sky_cover',
'current_weather']
obs.vector_field = ('uwind', 'vwind')
obs.reduce_points = 1
# Panel for plot with Map features
panel = MapPanel()
panel.layout = (1, 1, 1)
panel.area = (-124, -72, 20, 53)
panel.area = 'il'
panel.projection = ccrs.PlateCarree()
panel.layers = ['coastline', 'borders', 'states']
panel.plots = [obs]
# Bringing it all together
pc = PanelContainer()
pc.size = (10, 10)
pc.panels = [panel]
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.08)
def test_declarative_upa_obs():
"""Test making a full upperair observation plot."""
data = pd.read_csv(get_test_data('UPA_obs.csv', as_file_obj=False))
obs = PlotObs()
obs.data = data
obs.time = datetime(1993, 3, 14, 0)
obs.level = 500 * units.hPa
obs.fields = ['temperature', 'dewpoint', 'height']
obs.locations = ['NW', 'SW', 'NE']
obs.formats = [None, None, lambda v: format(v, '.0f')[:3]]
obs.vector_field = ('u_wind', 'v_wind')
obs.vector_field_length = 7
obs.reduce_points = 0
# Panel for plot with Map features
panel = MapPanel()
panel.layout = (1, 1, 1)
panel.area = (-124, -72, 20, 53)
panel.projection = 'lcc'
panel.layers = ['coastline', 'borders', 'states', 'land']
panel.plots = [obs]
# Bringing it all together
pc = PanelContainer()
pc.size = (15, 10)
pc.panels = [panel]
pc.draw()
obs.level = 300 * units.hPa
return pc.figure
def test_attribute_error_time():
"""Make sure we get a useful error when the time variable is not found."""
data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False),
infer_datetime_format=True, parse_dates=['valid'])
data.rename(columns={'valid': 'vtime'}, inplace=True)
obs = PlotObs()
obs.data = data
obs.time = datetime(1993, 3, 12, 12)
obs.level = None
obs.fields = ['tmpf']
obs.time_window = timedelta(minutes=15)
# Panel for plot with Map features
panel = MapPanel()
panel.layout = (1, 1, 1)
panel.projection = ccrs.PlateCarree()
panel.area = 'in'
panel.layers = ['states']
panel.plots = [obs]
panel.title = f'Surface Observations for {obs.time}'
# Bringing it all together
pc = PanelContainer()
pc.size = (10, 10)
pc.panels = [panel]
with pytest.raises(AttributeError):
pc.draw()
def test_attribute_error_station():
"""Make sure we get a useful error when the station variable is not found."""
data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False),
infer_datetime_format=True, parse_dates=['valid'])
data.rename(columns={'station': 'location'}, inplace=True)
obs = PlotObs()
obs.data = data
obs.time = datetime(1993, 3, 12, 12)
obs.level = None
obs.fields = ['tmpf']
obs.time_window = timedelta(minutes=15)
# Panel for plot with Map features
panel = MapPanel()
panel.layout = (1, 1, 1)
panel.projection = ccrs.PlateCarree()
panel.area = 'in'
panel.layers = ['states']
panel.plots = [obs]
panel.title = f'Surface Observations for {obs.time}'
# Bringing it all together
pc = PanelContainer()
pc.size = (10, 10)
pc.panels = [panel]
with pytest.raises(AttributeError):
pc.draw()
def test_save():
"""Test that our saving function works."""
pc = PanelContainer()
fobj = BytesIO()
pc.save(fobj, format='png')
fobj.seek(0)
# Test that our file object had something written to it.
assert fobj.read()
def test_show():
"""Test that show works properly."""
pc = PanelContainer()
# Matplotlib warns when using show with Agg
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
pc.show()
def test_panel():
"""Test the functionality of the panel property."""
panel = MapPanel()
pc = PanelContainer()
pc.panels = [panel]
assert pc.panel is panel
pc.panel = panel
assert pc.panel is panel
| bsd-3-clause | -1,550,168,224,563,722,000 | 25.984012 | 95 | 0.627525 | false |
CWSL/access-cm-tools | analyse/overall_mean.py | 1 | 1945 | #!/usr/bin/env python
from __future__ import print_function
import sys
import os
import argparse
import netCDF4 as nc
import numpy as np
from lib_mean import create_output_file, calc_overall_mean
"""
What this script does:
Calculate overall means.
How to run this script:
./overall_mean.py 01/ocean.nc 02/ocean.nc --output_file out.nc --vars temp
Which is equivalent to:
ncra 01/ocean.nc 02/ocean.nc out.nc
Don't use this script unless ncra doesn't work for some reason. For example on raijin ncra
will often do this:
[nah599@r974 gfdl_nyf_1080_2]$ ncra -v temp archive/output00*/ocean.nc temp_mean.nc
nco_err_exit(): ERROR Short NCO-generated message (usually name of function that triggered error): nco_put_vara()
nco_err_exit(): ERROR Error code is -101. Translation into English with nc_strerror(-101) is "NetCDF: HDF error"
nco_err_exit(): ERROR NCO will now exit with system call exit(EXIT_FAILURE)
Segmentation fault
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_files', nargs='+', help="""
The input data files in NetCDF format. These files can
be given in any order. They MUST appear before any other
arguments/options.""")
parser.add_argument('output_file', help="""The name of the output file.""")
parser.add_argument('--vars', default=[], nargs='+',
help='A list of the variables to average.')
parser.add_argument('--copy_vars', default=[], nargs='+',
help="""A list of the variables to copy across but not
included in the averaging.""")
args = parser.parse_args()
create_output_file(args.input_files[0], args.vars + args.copy_vars,
args.output_file)
calc_overall_mean(args.input_files, args.vars, args.output_file)
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | -9,120,558,180,579,979,000 | 31.966102 | 113 | 0.6509 | false |
aldwyn/effigia | apps/interactions/migrations/0001_initial.py | 1 | 2400 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-16 06:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('text', models.TextField()),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('liker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='liked', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| mit | 5,328,829,740,072,552,000 | 46.058824 | 149 | 0.625417 | false |
markgw/jazzparser | src/jazzparser/taggers/segmidi/chordclass/train.py | 1 | 31904 | """Unsupervised EM training for chordclass HMM tagging model.
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <[email protected]>"
import numpy, os, signal
from numpy import ones, float64, sum as array_sum, zeros, log2, \
add as array_add, subtract as array_subtract
import cPickle as pickle
from multiprocessing import Pool
from jazzparser.utils.nltk.probability import mle_estimator, logprob, add_logs, \
sum_logs, prob_dist_to_dictionary_prob_dist, \
cond_prob_dist_to_dictionary_cond_prob_dist
from jazzparser.utils.options import ModuleOption
from jazzparser.utils.system import get_host_info_string
from jazzparser.utils.strings import str_to_bool
from jazzparser import settings
from jazzparser.taggers.segmidi.chordclass.hmm import ChordClassHmm
from jazzparser.taggers.segmidi.midi import midi_to_emission_stream
from nltk.probability import ConditionalProbDist, FreqDist, \
ConditionalFreqDist, DictionaryProbDist, \
DictionaryConditionalProbDist, MutableProbDist
# Small quantity added to every probability to ensure we never get zeros
ADD_SMALL = 1e-10
def _sequence_updates(sequence, last_model, label_dom, schema_ids,
emission_cond_ids, update_initial=True, catch_interrupt=False):
"""
Evaluates the forward/backward probability matrices for a
single sequence under the model that came from the previous
iteration and returns matrices that contain the updates
to be made to the distributions during this iteration.
This is wrapped up in a function so it can be run in
parallel for each sequence. Once all sequences have been
evaluated, the results are combined and model updated.
@type update_initial: bool
@param update_initial: usually you want to update all distributions,
including the initial state distribution. If update_initial=False,
the initial state distribution updates won't be made for this sequence.
We want this when the sequence is actually a non-initial fragment of
a longer sequence
@type catch_interrupt: bool
@param catch_interrupt: catch KeyboardInterrupt exceptions and return
None. This is useful behaviour when calling this in a process pool,
since it allows the parent process to handle the interrupt, but should
be set to False (default) if calling directly.
"""
try:
# Get the sizes we'll need for the matrix
num_schemata = len(last_model.schemata)
num_root_changes = 12
num_chord_classes = len(last_model.chord_classes)
num_emission_conds = len(emission_cond_ids)
num_emissions = 12
T = len(sequence)
state_ids = dict([(state,id) for (id,state) in \
enumerate(last_model.label_dom)])
# Local versions of the matrices store the accumulated values
# for just this sequence (so we can normalize before adding
# to the global matrices)
# The numerators
schema_trans = zeros((num_schemata,num_schemata+1), float64)
root_trans = zeros((num_schemata,num_schemata,num_root_changes), float64)
ems = zeros((num_emission_conds,num_emissions), float64)
sinit = zeros(num_schemata, float64)
# Compute the forward and backward probabilities
# These are normalized, but that makes no difference to the outcome of
# compute_gamma and compute_xi
alpha,scale,seq_logprob = last_model.normal_forward_probabilities(sequence, array=True)
beta,scale = last_model.normal_backward_probabilities(sequence, array=True)
# gamma contains the state occupation probability for each state at each
# timestep
gamma = last_model.compute_gamma(sequence, forward=alpha, backward=beta)
# xi contains the probability of every state transition at every timestep
xi = last_model.compute_xi(sequence, forward=alpha, backward=beta)
# Update the initial state distribution if requested
if update_initial:
for state in label_dom:
schema, root, chord_class = state
schema_i = schema_ids[schema]
# Add this contribution to the sum of the states with this schema
sinit[schema_i] += gamma[0][state_ids[state]]
for time in range(T):
for state in label_dom:
schema, root, chord_class = state
schema_i = schema_ids[schema]
state_i = state_ids[state]
if time < T-1:
# Go through all possible pairs of states to update the
# transition distributions
for next_state in label_dom:
next_schema, next_root, next_chord_class = next_state
schema_j = schema_ids[next_schema]
state_j = state_ids[next_state]
## Transition dist update ##
root_change = (next_root - root) % 12
schema_trans[schema_i][schema_j] += \
xi[time][state_i][state_j]
root_trans[schema_i][schema_j][root_change] += \
xi[time][state_i][state_j]
else:
# Final state: update the probs of transitioning to end
schema_trans[schema_i][num_schemata] += gamma[T-1][state_i]
## Emission dist update ##
# Add the state occupation probability to the emission numerator
# for every note
for pc,beat in sequence[time]:
# Take the pitch class relative to the root
rel_pc = (pc - root) % 12
ems[emission_cond_ids[(chord_class,beat)]][rel_pc] += \
gamma[time][state_i]
# Calculate the denominators
schema_trans_denom = array_sum(schema_trans, axis=1)
root_trans_denom = array_sum(root_trans, axis=2)
ems_denom = array_sum(ems, axis=1)
# This should come to 1.0
sinit_denom = array_sum(sinit)
# Wrap this all up in a tuple to return to the master
return (schema_trans, root_trans, ems, sinit, \
schema_trans_denom, root_trans_denom, ems_denom, sinit_denom, \
seq_logprob)
except KeyboardInterrupt:
if catch_interrupt:
return
else:
raise
## End of pool operation _sequence_updates
class ChordClassBaumWelchTrainer(object):
"""
Class with methods to retrain a chordclass model using the Baum-Welch
EM algorithm.
Module options must be processed already - we do that in the
ChordClassTaggerModel, not here.
@todo: Inherit from the
L{jazzparser.utils.nltk.ngram.baumwelch.BaumWelchTrainer}. Currently,
the generic trainer duplicates a lot of this code, since it was based on
it.
"""
# These will be included in the training options
OPTIONS = [
ModuleOption('max_iterations', filter=int,
help_text="Number of training iterations to give up after "\
"if we don't reach convergence before.",
usage="max_iterations=N, where N is an integer", default=100),
ModuleOption('convergence_logprob', filter=float,
help_text="Difference in overall log probability of the "\
"training data made by one iteration after which we "\
"consider the training to have converged.",
usage="convergence_logprob=X, where X is a small floating "\
"point number (e.g. 1e-3)", default=1e-3),
ModuleOption('split', filter=int,
help_text="Limits the length of inputs by splitting them into "\
"fragments of at most this length. The initial state "\
"distribution will only be updated for the initial fragments.",
usage="split=X, where X is an int"),
ModuleOption('truncate', filter=int,
help_text="Limits the length of inputs by truncating them to this "\
"number of timesteps. Truncation is applied before splitting.",
usage="truncate=X, where X is an int"),
ModuleOption('save_intermediate', filter=str_to_bool,
help_text="Save the model between iterations",
usage="save_intermediate=B, where B is 'true' or 'false' "\
"(default true)",
default=True),
ModuleOption('trainprocs', filter=int,
help_text="Number of processes to spawn during training. Use -1 "\
"to spawn a process for every sequence.",
usage="trainprocs=P, where P is an integer",
default=1),
]
def __init__(self, model, options={}):
self.model = model
self.options = options
def train(self, emissions, logger=None, save_callback=None):
"""
Performs unsupervised training using Baum-Welch EM.
This is performed on a model that has already been initialized.
You might, for example, create such a model using
L{jazzparser.taggers.segmidi.chordclass.hmm.ChordClassHmm.initialize_chord_classes}.
This is based on the training procedure in NLTK for HMMs:
C{nltk.tag.hmm.HiddenMarkovModelTrainer.train_unsupervised}.
@type emissions: L{jazzparser.data.input.MidiTaggerTrainingBulkInput} or
list of L{jazzparser.data.input.Input}s
@param emissions: training MIDI data
@type logger: logging.Logger
@param logger: a logger to send progress logging to
"""
if logger is None:
from jazzparser.utils.loggers import create_dummy_logger
logger = create_dummy_logger()
self.model.add_history("Beginning Baum-Welch training on %s" % get_host_info_string())
self.model.add_history("Training on %d MIDI sequences (with %s segments)" % \
(len(emissions), ", ".join("%d" % len(seq) for seq in emissions)))
logger.info("Beginning Baum-Welch training on %s" % get_host_info_string())
# Get some options out of the module options
max_iterations = self.options['max_iterations']
convergence_logprob = self.options['convergence_logprob']
split_length = self.options['split']
truncate_length = self.options['truncate']
save_intermediate = self.options['save_intermediate']
processes = self.options['trainprocs']
# Make a mutable distribution for each of the distributions
# we'll be updating
emission_mdist = cond_prob_dist_to_dictionary_cond_prob_dist(
self.model.emission_dist, mutable=True)
schema_trans_mdist = cond_prob_dist_to_dictionary_cond_prob_dist(
self.model.schema_transition_dist, mutable=True)
root_trans_mdist = cond_prob_dist_to_dictionary_cond_prob_dist(
self.model.root_transition_dist, mutable=True)
init_state_mdist = prob_dist_to_dictionary_prob_dist(
self.model.initial_state_dist, mutable=True)
# Get the sizes we'll need for the matrices
num_schemata = len(self.model.schemata)
num_root_changes = 12
num_chord_classes = len(self.model.chord_classes)
if self.model.metric:
num_emission_conds = num_chord_classes * 4
else:
num_emission_conds = num_chord_classes
num_emissions = 12
# Enumerations to use for the matrices, so we know what they mean
schema_ids = dict([(sch,i) for (i,sch) in enumerate(self.model.schemata+[None])])
if self.model.metric:
rs = range(4)
else:
rs = [0]
emission_cond_ids = dict([(cc,i) for (i,cc) in enumerate(\
sum([[
(str(cclass.name),r) for r in rs] for cclass in self.model.chord_classes],
[]))])
# Construct a model using these mutable distributions so we can
# evaluate using them
model = ChordClassHmm(schema_trans_mdist,
root_trans_mdist,
emission_mdist,
self.model.emission_number_dist,
init_state_mdist,
self.model.schemata,
self.model.chord_class_mapping,
self.model.chord_classes,
metric=self.model.metric,
illegal_transitions=self.model.illegal_transitions,
fixed_root_transitions=self.model.fixed_root_transitions)
def _save():
if save_callback is None:
logger.error("Could not save model, as no callback was given")
else:
# If the writing fails, wait till I've had a chance to sort it
# out and then try again. This happens when my AFS token runs
# out
while True:
try:
save_callback()
except (IOError, OSError), err:
print "Error writing model to disk: %s. " % err
raw_input("Press <enter> to try again... ")
else:
break
########## Data preprocessing
# Preprocess the inputs so they're ready for the model training
emissions = [midi_to_emission_stream(seq,
metric=self.model.metric,
remove_empty=False)[0] \
for seq in emissions]
logger.info("%d input sequences" % len(emissions))
# Truncate long streams
if truncate_length is not None:
logger.info("Truncating sequences to max %d timesteps" % \
truncate_length)
emissions = [stream[:truncate_length] for stream in emissions]
# Split up long streams if requested
# After this, each stream is a tuple (first,stream), where first
# indicates whether the stream segment begins a song
if split_length is not None:
logger.info("Splitting sequences into max %d-sized chunks" % \
split_length)
split_emissions = []
# Split each stream
for emstream in emissions:
input_ems = emstream
splits = []
first = True
# Take bits of length split_length until we're under the max
while len(input_ems) >= split_length:
# Overlap the splits by one so we get all transitions
splits.append((first, input_ems[:split_length]))
input_ems = input_ems[split_length-1:]
first = False
# Get the last short one
if len(input_ems):
splits.append((first, input_ems))
split_emissions.extend(splits)
else:
# All streams begin a song
split_emissions = [(True,stream) for stream in emissions]
logger.info("Sequence lengths after preprocessing: %s" %
" ".join([str(len(em[1])) for em in split_emissions]))
##########
# Train the emission number distribution on this data to start with
# This doesn't get updated by the iterative steps, because it's not
# dependent on chord classes
model.train_emission_number_distribution(emissions)
logger.info("Trained emission number distribution")
# Save the model with this
if save_intermediate:
_save()
###############
# TODO: remove this section - it's for debugging
if False:
from jazzparser.prototype.baumwelch import TempBaumWelchTrainer
temptrainer = TempBaumWelchTrainer(model, self.options)
temptrainer.train(split_emissions, logger=logger)
return
###############
# Special case of -1 for number of sequences
# No point in creating more processes than there are sequences
if processes == -1 or processes > len(split_emissions):
processes = len(split_emissions)
iteration = 0
last_logprob = None
try:
while iteration < max_iterations:
logger.info("Beginning iteration %d" % iteration)
current_logprob = 0.0
### Matrices in which to accumulate new probability estimates
# trans contains new transition numerator probabilities
# TODO: update this...
# trans[s][s'][dr] = Sum_{t_n=t_(n+1), m_n=m_(n+1),c_n=c,c_(n+1)=c'}
# alpha(x_n).beta(x_(n+1)).
# p(x_(n+1)|x_n).p(y_(n+1)|x_(n+1))
schema_trans = zeros((num_schemata,num_schemata+1), float64)
root_trans = zeros((num_schemata,num_schemata,num_root_changes), float64)
# ems contains the new emission numerator probabilities
# TODO: update this...
# ems[r][d] = Sum_{d(y_n^k, x_n)=d, r_n^k=r}
# alpha(x_n).beta(x_n) /
# Sum_{x'_n} (alpha(x'_n).beta(x'_n))
ems = zeros((num_emission_conds,num_emissions), float64)
# sinit contains the initial state numerator probabilities
sinit = zeros(num_schemata, float64)
# And these are the denominators
schema_trans_denom = zeros(num_schemata, float64)
root_trans_denom = zeros((num_schemata,num_schemata), float64)
ems_denom = zeros(num_emission_conds, float64)
# It may seem silly to use a matrix for this, but it allows
# us to update it in the callback
sinit_denom = zeros(1, float64)
def _training_callback(result):
"""
Callback for the _sequence_updates processes that takes
the updates from a single sequence and adds them onto
the global update accumulators.
"""
if result is None:
# Process cancelled: do no updates
logger.warning("Child process was cancelled")
return
# _sequence_updates() returns all of this as a tuple
(schema_trans_local, root_trans_local, ems_local, sinit_local, \
schema_trans_denom_local, root_trans_denom_local, \
ems_denom_local, sinit_denom_local, \
seq_logprob) = result
# Add these probabilities from this sequence to the
# global matrices
# We don't need to scale these using the seq prob because
# they're already normalized
# Emission numerator
array_add(ems, ems_local, ems)
# Transition numerator
array_add(schema_trans, schema_trans_local, schema_trans)
array_add(root_trans, root_trans_local, root_trans)
# Initial state numerator
array_add(sinit, sinit_local, sinit)
# Denominators
array_add(ems_denom, ems_denom_local, ems_denom)
array_add(schema_trans_denom, schema_trans_denom_local, schema_trans_denom)
array_add(root_trans_denom, root_trans_denom_local, root_trans_denom)
array_add(sinit_denom, sinit_denom_local, sinit_denom)
## End of _training_callback
# Only use a process pool if there's more than one sequence
if processes > 1:
# Create a process pool to use for training
logger.info("Creating a pool of %d processes" % processes)
# catch them at this level
pool = Pool(processes=processes)
async_results = []
try:
for seq_i,(first,sequence) in enumerate(split_emissions):
logger.info("Iteration %d, sequence %d" % (iteration, seq_i))
T = len(sequence)
if T == 0:
continue
# Fire off a new call to the process pool for every sequence
async_results.append(
pool.apply_async(_sequence_updates,
(sequence, model,
self.model.label_dom,
schema_ids,
emission_cond_ids),
{ 'update_initial' : first,
'catch_interrupt' : True },
callback=_training_callback) )
pool.close()
# Wait for all the workers to complete
pool.join()
except KeyboardInterrupt:
# If Ctl+C is fired during the processing, we exit here
logger.info("Keyboard interrupt was received during EM "\
"updates")
raise
# Call get() on every AsyncResult so that any exceptions in
# workers get raised
for res in async_results:
# If there was an exception in _sequence_update, it
# will get raised here
res_tuple = res.get()
# Add this sequence's logprob into the total for all sequences
current_logprob += res_tuple[-1]
else:
if len(split_emissions) == 1:
logger.info("One sequence: not using a process pool")
else:
logger.info("Not using a process pool: training %d "\
"emission sequences sequentially" % \
len(split_emissions))
for seq_i,(first,sequence) in enumerate(split_emissions):
if len(sequence) > 0:
logger.info("Iteration %d, sequence %d" % (iteration, seq_i))
updates = _sequence_updates(
sequence, model,
self.model.label_dom,
schema_ids, emission_cond_ids,
update_initial=first)
_training_callback(updates)
# Update the overall logprob
current_logprob += updates[-1]
######## Model updates
# Update the model's probabilities from the accumulated values
# Emission distribution
for cond_tup in model.emission_dist.conditions():
cond_id = emission_cond_ids[cond_tup]
# Divide each numerator by the denominator
denom = ems_denom[cond_id]
for pc in range(12):
# Convert to log probs for update and divide by denom
prob = logprob(ems[cond_id][pc] + ADD_SMALL) - \
logprob(denom + 12*ADD_SMALL)
model.emission_dist[cond_tup].update(pc, prob)
# Transition distribution
num_trans_samples = len(self.model.schemata)
# Dist conditioned on current schema
for schema in self.model.schemata:
schema_i = schema_ids[schema]
schema_denom = schema_trans_denom[schema_i]
# Observe next schema and change of root
for next_schema in self.model.schemata:
schema_j = schema_ids[next_schema]
# Convert to log probs for update and divide by denom
prob = \
logprob(schema_trans[schema_i][schema_j] \
+ ADD_SMALL) - \
logprob(schema_denom + (num_trans_samples+1)*ADD_SMALL)
model.schema_transition_dist[schema].update(
next_schema, prob)
root_denom = root_trans_denom[schema_i][schema_j]
for root_change in range(12):
# Convert to log probs for update and divide by denom
prob = \
logprob(root_trans[schema_i][schema_j][root_change] \
+ ADD_SMALL) - \
logprob(root_denom + 12*ADD_SMALL)
model.root_transition_dist[(schema,next_schema)].update(
root_change, prob)
# Also transition to the final state
prob = \
logprob(schema_trans[schema_i][num_schemata] \
+ ADD_SMALL) - \
logprob(schema_denom + (num_trans_samples+1)*ADD_SMALL)
model.schema_transition_dist[schema].update(None, prob)
# Initial state distribution
denom = sinit_denom[0]
num_samples = len(self.model.schemata)
for schema in self.model.schemata:
schema_i = schema_ids[schema]
# Convert to log probs for update and divide by denom
prob = \
logprob(sinit[schema_i] + ADD_SMALL) - \
logprob(denom + num_samples*ADD_SMALL)
model.initial_state_dist.update(schema, prob)
# Clear the model's cache so we get the new probabilities
model.clear_cache()
logger.info("Training data log prob: %s" % current_logprob)
if last_logprob is not None and current_logprob < last_logprob:
# Drop in log probability
# This should never happen if all's working correctly
logger.error("Log probability dropped by %s" % \
(last_logprob - current_logprob))
if last_logprob is not None:
logger.info("Log prob change: %s" % \
(current_logprob - last_logprob))
# Check whether the log probability has converged
if iteration > 0 and \
abs(current_logprob - last_logprob) < convergence_logprob:
# Don't iterate any more
logger.info("Distribution has converged: ceasing training")
break
iteration += 1
last_logprob = current_logprob
# Update the main model
self.update_model(model)
# Only save if we've been asked to save between iterations
if save_intermediate:
_save()
except KeyboardInterrupt:
# Interrupted during training
self.model.add_history("Baum-Welch training interrupted after %d "\
"iterations" % iteration)
logger.warn("Baum-Welch training interrupted")
raise
except Exception, err:
# Some other error during training
self.model.add_history("Error during Baum-Welch training. Exiting "\
"after %d iterations" % iteration)
logger.error("Error during training: %s" % err)
raise
self.model.add_history("Completed Baum-Welch training (%d iterations)" \
% iteration)
logger.info("Completed Baum-Welch training (%d iterations)" % iteration)
# Update the distribution's parameters with those we've trained
self.update_model(model)
# Always save the model now that we're done
_save()
return
def update_model(self, model):
"""
Replaces the distributions of the saved model with those of the given
model and saves it.
"""
# Replicate the distributions of the source model so that we get
# non-mutable distributions to store
self.model.schema_transition_dist = \
cond_prob_dist_to_dictionary_cond_prob_dist(model.schema_transition_dist)
self.model.root_transition_dist = \
cond_prob_dist_to_dictionary_cond_prob_dist(model.root_transition_dist)
self.model.emission_dist = \
cond_prob_dist_to_dictionary_cond_prob_dist(model.emission_dist)
self.model.initial_state_dist = prob_dist_to_dictionary_prob_dist(
model.initial_state_dist)
| gpl-3.0 | 6,740,012,073,236,440,000 | 48.928013 | 95 | 0.524793 | false |
eallik/spinoff | spinoff/tests/test_pattern_matching.py | 1 | 4162 | from spinoff.util.testing import assert_not_raises
from spinoff.util.pattern_matching import match, ANY, IGNORE, IS_INSTANCE, NOT
FLATTEN = True
def NO(outlen, pattern, data):
x = match(pattern, data, flatten=FLATTEN)
assert not x[0] if isinstance(x, tuple) else not x, x
if FLATTEN:
assert isinstance(x, bool) if outlen == 0 else len(x[1:]) == outlen
else:
assert type(x[1]) is tuple
assert len(x[1]) == outlen, "length should have been %s but was %s" % (outlen, len(x[1]))
def YES(out, pattern, data):
x = match(pattern, data, flatten=FLATTEN)
if not FLATTEN:
assert x[0], "should have matched"
assert type(x[1]) is tuple
assert x[1] == out, "should have returned %s but returned %s" % (repr(out), repr(x[1]))
else:
assert x is True if out == () else x[0], "should have matched"
assert out == () or x[1:] == out, "should have returned %s but returned %s" % (repr(out), repr(x[1:]))
def test_without_flatten():
global FLATTEN
FLATTEN = False
NO(0, 'foo', 'bar')
YES((), 'foo', 'foo')
NO(0, (), 'whatev')
YES((), (), ())
YES(('whatev', ), ANY, 'whatev')
YES((('whatev', 'whatev'), ), ANY, ('whatev', 'whatev'))
YES((), ('foo',), ('foo',))
NO(0, ('whatev',), ('whatev', 'whatev'))
NO(0, ('whatev', 'whatev'), ('whatev',))
YES((('foo',), ), ANY, ('foo',))
YES(('foo',), (ANY,), ('foo',))
YES((), (IGNORE(ANY),), ('whatev',))
YES(('foo',), (ANY, IGNORE(ANY)), ('foo', 'whatev',))
YES((), (IGNORE(ANY), IGNORE(ANY)), ('whatev', 'whatev',))
YES(('foo', 'bar'), (ANY, ANY), ('foo', 'bar',))
YES((), ('foo', IGNORE(ANY)), ('foo', 'whatev',))
NO(0, ('foo', IGNORE(ANY)), ('WRONG', 'whatev',))
NO(1, ('foo', ANY), ('WRONG', 'whatev',))
YES((), ('foo', (IGNORE(ANY), )), ('foo', ('whatev', )))
YES((1, 2, 3), ('foo', (ANY, (ANY, (ANY, )))), ('foo', (1, (2, (3,)))))
YES((2, 3), ('foo', (IGNORE(ANY), (ANY, (ANY, )))), ('foo', (1, (2, (3,)))))
YES((3, ), ('foo', (IGNORE(ANY), (IGNORE(ANY), (ANY, )))), ('foo', (1, (2, (3,)))))
with assert_not_raises(ValueError):
_, (_, _, _) = match(
('foo', (ANY, (ANY, (ANY, )))),
('WRONG', (1, (2, (3,)))),
flatten=False)
with assert_not_raises(ValueError):
_, _, _, _ = match(
('foo', (ANY, (ANY, (ANY, )))),
('WRONG', (1, (2, (3,)))),
flatten=True)
def test_flatten():
global FLATTEN
FLATTEN = True
NO(0, 'foo', 'bar')
YES((), 'foo', 'foo')
YES((), (), ())
YES(('whatev',), ANY, 'whatev')
YES((('whatev', 'whatev'), ), ANY, ('whatev', 'whatev'))
YES((), ('foo',), ('foo',))
YES((('foo',),), ANY, ('foo',))
YES(('foo',), (ANY,), ('foo',))
YES((), (IGNORE(ANY),), ('whatev',))
YES(('foo',), (ANY, IGNORE(ANY)), ('foo', 'whatev',))
YES((), (IGNORE(ANY), IGNORE(ANY)), ('whatev', 'whatev',))
YES(('foo', 'bar'), (ANY, ANY), ('foo', 'bar',))
YES((), ('foo', IGNORE(ANY)), ('foo', 'whatev',))
YES((), ('foo', (IGNORE(ANY), )), ('foo', ('whatev', )))
YES((1, 2, 3,), ('foo', (ANY, (ANY, (ANY, )))), ('foo', (1, (2, (3,)))))
YES((2, 3,), ('foo', (IGNORE(ANY), (ANY, (ANY, )))), ('foo', (1, (2, (3,)))))
YES((3,), ('foo', (IGNORE(ANY), (IGNORE(ANY), (ANY, )))), ('foo', (1, (2, (3,)))))
ok, v1, v2 = match(('foo', ('bar', ANY, ('baz', ANY))),
('foo', ('bar', 123, ('baz', 456))),
flatten=True)
assert ok and (v1, v2) == (123, 456)
def test_advanced():
global FLATTEN
FLATTEN = True
assert IS_INSTANCE(int) == 1
YES((1,), IS_INSTANCE(int), 1)
YES((), IGNORE(IS_INSTANCE(int)), 1)
def test_not():
assert NOT(IS_INSTANCE(int)) == 'string'
for val in ['string', 123, True, None, object(), 123.456]:
assert NOT(ANY) != val
assert NOT(NOT(IS_INSTANCE(int))) == 3
def test_or():
assert (IS_INSTANCE(int) | IS_INSTANCE(float)) == 3
assert (IS_INSTANCE(int) | IS_INSTANCE(float)) == 3.3
assert not ((IS_INSTANCE(int) | IS_INSTANCE(float)) == 'hello')
| bsd-2-clause | 4,785,376,480,909,404,000 | 31.515625 | 110 | 0.481259 | false |
openvstorage/arakoon | pylabs/test/server/quick/test_client_lib.py | 1 | 1890 | """
Copyright (2010-2014) INCUBAID BVBA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from Compat import X
import os
import logging
import subprocess
def test_client_lib():
my_temp = '/tmp/client_lib_test'
OCAML_LIBDIR = X.subprocess.check_output('ocamlfind printconf destdir',
shell=True)
OCAML_LIBDIR = OCAML_LIBDIR.strip()
env = os.environ.copy()
env['OCAML_LIBDIR'] = OCAML_LIBDIR
cmds = [
(['make', 'uninstall_client'], None),
(['make', 'install'], None),
(['mkdir', '-p', my_temp], None),
(['cp', './examples/ocaml/demo.ml', my_temp], None),
(['ocamlbuild', '-use-ocamlfind', '-package','lwt' ,
'-package','arakoon_client',
'-tags', 'annot,debug,thread',
'demo.native'], my_temp),
(['make', 'uninstall_client'], None),
]
for cmd, cwd in cmds:
if cwd == None:
cwd = '../..'
print cmd
try:
r = X.subprocess.check_output(cmd,
cwd = cwd,
env = env,
stderr= X.subprocess.STDOUT
)
print r
except subprocess.CalledProcessError as ex:
logging.info("ex:%s" % ex)
logging.info("output=%s" % ex.output)
raise ex
| apache-2.0 | -1,131,830,869,059,534,800 | 34 | 75 | 0.562963 | false |
Thurion/EDSM-RSE-for-EDMC | Backgroundworker.py | 1 | 2388 | """
EDSM-RSE a plugin for EDMC
Copyright (C) 2019 Sebastian Bauer
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from threading import Thread, Timer
from BackgroundTask import TimedTask
from queue import Queue
import os
import traceback
import logging
from RseData import RseData
from config import appname
logger = logging.getLogger(f"{appname}.{os.path.basename(os.path.dirname(__file__))}")
class BackgroundWorker(Thread):
def __init__(self, queue: Queue, rse_data: RseData, interval: int = 60 * 15):
Thread.__init__(self)
self.queue = queue
self.rse_data = rse_data
self.interval = interval # in seconds
self.timer = None
def timer_task(self):
logging.debug("TimerTask triggered.")
self.timer = Timer(self.interval, self.timer_task)
self.timer.daemon = True
self.timer.start()
self.queue.put(TimedTask(self.rse_data))
def run(self):
self.rse_data.initialize()
self.timer = Timer(self.interval, self.timer_task)
self.timer.daemon = True
self.timer.start()
while True:
task = self.queue.get()
if not task:
break
else:
try:
task.execute()
except Exception as e:
logger.exception("Exception occurred in background task {bg}.".format(bg=task.__class__.__name__))
traceback.print_exc()
self.queue.task_done()
if self.timer:
logger.debug("Stopping RSE background timer.")
self.timer.cancel()
self.timer.join()
self.queue.task_done()
| gpl-2.0 | 678,412,927,906,021,000 | 32.608696 | 118 | 0.632328 | false |
ovcrash/geoip-attack-map | DataServer/syslog-gen.py | 1 | 1948 | #!/usr/bin/python3
import random, syslog
from sys import exit
from time import sleep
#syslog_path = '/var/log/syslog/'
port_list = [
0,
1,
20,
21,
22,
23,
25,
40,
43,
53,
80,
88,
109,
110,
115,
118,
143,
156,
161,
220,
389,
443,
445,
636,
1433,
1434,
3306,
3389,
5900,
5901,
5902,
5903,
8080,
9999,
]
def main():
#global syslog_path
global port_list
#with open(syslog_path, "w") as syslog_file:
while True:
port = random.choice(port_list)
syslog.syslog('{}.{}.{}.{},{}.{}.{}.{},{},{}'.format(
random.randrange(1, 256),
random.randrange(1, 256),
random.randrange(1, 256),
random.randrange(1, 256),
random.randrange(1, 256),
random.randrange(1, 256),
random.randrange(1, 256),
random.randrange(1, 256),
port,
port,
))
sleep(.1)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit()
| gpl-3.0 | 371,194,105,848,068,600 | 26.43662 | 85 | 0.268994 | false |
draios/python-sdc-client | examples/dashboard_backup_v1_restore_v2.py | 1 | 1709 | #!/usr/bin/env python
#
# Save the first user dashboard to file and then use create_dashboard_from_file()
# to apply the stored dasboard again with a different filter.
#
import sys
from sdcclient import SdMonitorClient
from sdcclient import SdMonitorClientV1
#
# Parse arguments
#
if len(sys.argv) != 5:
print((
'usage: %s <sysdig-v1-url> <sysdig-v1-token> <sysdig-v2-url> <sysdig-v2-token>'
% sys.argv[0]))
print(
'You can find your token at https://app.sysdigcloud.com/#/settings/user'
)
sys.exit(1)
sdc_v1_url = sys.argv[1]
sdc_v1_token = sys.argv[2]
sdc_v2_url = sys.argv[3]
sdc_v2_token = sys.argv[4]
#
# Instantiate the SDC client
#
sdclient_v2 = SdMonitorClient(sdc_v2_token, sdc_url=sdc_v2_url)
sdclient_v1 = SdMonitorClientV1(sdc_v1_token, sdc_url=sdc_v1_url)
#
# Serialize the first user dashboard to disk
#
ok, res = sdclient_v1.get_dashboards()
if not ok:
print(res)
sys.exit(1)
for dashboard in res['dashboards']:
file_name = '{}.json'.format(dashboard['id'])
print(('Saving v1 dashboard {} to file {}...'.format(
dashboard['name'], file_name)))
sdclient_v1.save_dashboard_to_file(dashboard, file_name)
print('Importing dashboard to v2...')
ok, res = sdclient_v2.create_dashboard_from_file(
'import of {}'.format(dashboard['name']),
file_name,
None,
shared=dashboard['isShared'],
public=dashboard['isPublic'])
if ok:
print(('Dashboard {} imported!'.format(dashboard['name'])))
sdclient_v2.delete_dashboard(res['dashboard'])
else:
print(('Dashboard {} import failed:'.format(dashboard['name'])))
print(res)
print('\n')
| mit | -6,236,592,850,024,548,000 | 25.703125 | 91 | 0.643066 | false |
cts-admin/cts | cts/members/test_admin.py | 1 | 3727 | from datetime import date, timedelta
from django.contrib import admin
from django.test import TestCase
from .admin import CorporateMemberAdmin, StatusFilter
from .models import CorporateMember
class CorporateMemberAdminTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.member = CorporateMember.objects.create(
display_name='Corporation',
billing_name='foo',
billing_email='[email protected]',
contact_email='[email protected]',
membership_level=2,
)
cls.inactive_member = CorporateMember.objects.create(
display_name='Inactive Corporation',
billing_name='inactive',
billing_email='[email protected]',
contact_email='[email protected]',
membership_level=2,
inactive=True,
)
def test_membership_expires(self):
today = date.today()
yesterday = date.today() - timedelta(days=1)
plus_thirty_one_days = today + timedelta(days=31)
modeladmin = CorporateMemberAdmin(CorporateMember, admin.site)
self.assertIsNone(modeladmin.membership_expires(self.member))
self.member.invoice_set.create(amount=500)
self.assertIsNone(modeladmin.membership_expires(self.member))
self.member.invoice_set.create(amount=500, expiration_date=yesterday)
self.assertIn('red', modeladmin.membership_expires(self.member))
self.member.invoice_set.create(amount=500, expiration_date=today)
self.assertIn('orange', modeladmin.membership_expires(self.member))
self.member.invoice_set.create(amount=500, expiration_date=plus_thirty_one_days)
self.assertIn('green', modeladmin.membership_expires(self.member))
def test_renewal_link(self):
expected_beg = '<a href="/members/corporate-membership/renew/'
expended_end = '/"><img src="/static/admin/img/icon-changelink.svg" alt="renewal link" />'
modeladmin = CorporateMemberAdmin(CorporateMember, admin.site)
renewal_link = modeladmin.renewal_link(self.member)
self.assertTrue(renewal_link.startswith(expected_beg))
self.assertTrue(renewal_link.endswith(expended_end))
def test_status_filter(self):
members = CorporateMember.objects.all()
filter_args = {'request': None, 'params': {}, 'model': None, 'model_admin': None}
self.assertCountEqual(
StatusFilter(**filter_args).queryset(request=None, queryset=members),
[self.member]
)
filter_args['params'] = {'status': 'inactive'}
self.assertCountEqual(
StatusFilter(**filter_args).queryset(None, CorporateMember.objects.all()),
[self.inactive_member]
)
filter_args['params'] = {'status': 'all'}
self.assertCountEqual(
StatusFilter(**filter_args).queryset(None, CorporateMember.objects.all()),
[self.member, self.inactive_member]
)
status_filter = StatusFilter(**filter_args)
self.assertEqual(
status_filter.lookups(request=None, model_admin=None),
(
(None, 'Active'),
('inactive', 'Inactive'),
('all', 'All'),
)
)
class MockChangeList:
def get_query_string(self, *args):
return ''
self.assertEqual(
list(status_filter.choices(cl=MockChangeList())),
[
{'display': 'Active', 'query_string': '', 'selected': True},
{'display': 'Inactive', 'query_string': '', 'selected': False},
{'display': 'All', 'query_string': '', 'selected': False},
]
)
| gpl-3.0 | -6,265,244,848,219,483,000 | 40.411111 | 98 | 0.613362 | false |
mapr/sahara | sahara/plugins/mapr/services/swift/swift.py | 1 | 1854 | # Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
import sahara.plugins.mapr.domain.service as s
import sahara.plugins.mapr.services.management.management as management
import sahara.plugins.mapr.services.maprfs.maprfs as maprfs
import sahara.utils.files as f
LOG = logging.getLogger(__name__)
@six.add_metaclass(s.Single)
class Swift(s.Service):
HADOOP_SWIFT_JAR = ('plugins/mapr/services/swift/'
'resources/hadoop-swift-latest.jar')
def __init__(self):
super(Swift, self).__init__()
self.name = 'swift'
self.ui_name = 'Swift'
self.cluster_defaults = ['swift-default.json']
def configure(self, context, instances=None):
instances = instances or context.get_instances()
file_servers = context.filter_instances(instances, maprfs.FILE_SERVER)
self._install_swift_jar(context, file_servers)
def _install_swift_jar(self, context, instances):
jar = f.get_file_text(Swift.HADOOP_SWIFT_JAR)
path = '%s/swift.jar' % context.hadoop_lib
for instance in instances:
LOG.debug("Writing swift.jar to instance %s",
instance.management_ip)
with instance.remote() as r:
r.write_file_to(path, jar, run_as_root=True)
| apache-2.0 | 4,631,164,114,704,599,000 | 35.352941 | 78 | 0.683927 | false |
tensorflow/datasets | tensorflow_datasets/image_classification/siscore/siscore.py | 1 | 4560 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SI-SCORE synthetic dataset."""
import os
import dataclasses
import tensorflow.compat.v2 as tf
from tensorflow_datasets.image_classification.siscore import siscore_labels
import tensorflow_datasets.public_api as tfds
_CITATION = """\
@misc{djolonga2020robustness,
title={On Robustness and Transferability of Convolutional Neural Networks},
author={Josip Djolonga and Jessica Yung and Michael Tschannen and Rob Romijnders and Lucas Beyer and Alexander Kolesnikov and Joan Puigcerver and Matthias Minderer and Alexander D'Amour and Dan Moldovan and Sylvain Gelly and Neil Houlsby and Xiaohua Zhai and Mario Lucic},
year={2020},
eprint={2007.08558},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
_DESCRIPTION = """
SI-Score (Synthetic Interventions on Scenes for Robustness Evaluation) is a
dataset to evaluate robustness of image classification models to changes in
object size, location and rotation angle.
In SI-SCORE, we take objects and backgrounds and systematically vary object
size, location and rotation angle so we can study the effect of changing these
factors on model performance. The image label space is the ImageNet
label space (1k classes) for easy evaluation of models.
More information about the dataset can be found at https://github.com/google-research/si-score.
"""
_NUM_CLASSES = 61
_BASE_URL = "https://s3.us-east-1.amazonaws.com/si-score-dataset"
_VARIANT_EXPANDED_DIR_NAMES = {
"size": "area",
"rotation": "rotation",
"location": "location20_area02_min0pc",
}
@dataclasses.dataclass
class SiscoreConfig(tfds.core.BuilderConfig):
"""BuilderConfig for SI-Score.
Attributes:
variant: str. The synthetic dataset variant. One of 'rotation', 'size' and
'location'.
name: str. The name of the factor to vary (same as variant).
description: str. A brief description of the config (different from the
global dataset description).
"""
variant: str = ""
class Siscore(tfds.core.GeneratorBasedBuilder):
"""SI-Score synthetic image dataset."""
VERSION = tfds.core.Version("1.0.0")
RELEASE_NOTES = {
"1.0.0": "Initial release.",
}
BUILDER_CONFIGS = [
SiscoreConfig(variant=x, name=x, description=f"factor of variation: {x}")
for x in ["rotation", "size", "location"] # pytype: disable=wrong-keyword-args
]
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
features=tfds.features.FeaturesDict({
"image_id":
tf.int64,
"image":
tfds.features.Image(),
# ImageNet label space
"label":
tfds.features.ClassLabel(num_classes=1000),
"dataset_label":
tfds.features.ClassLabel(
names=siscore_labels.IMAGENET_LABELS_LIST),
}),
supervised_keys=("image", "label"),
# Homepage of the dataset for documentation
homepage="https://github.com/google-research/si-score",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerator."""
# using rotation link only for now
variant = self.builder_config.variant
dataset_url = "/".join((_BASE_URL, f"{variant}.zip"))
path = dl_manager.download_and_extract(dataset_url)
path = os.path.join(path, _VARIANT_EXPANDED_DIR_NAMES[variant])
return {"test": self._generate_examples(datapath=path)}
def _generate_examples(self, datapath):
"""Yields examples of synthetic data images and labels."""
for fpath in tf.io.gfile.glob(os.path.join(datapath, "*", "*.jpg")):
label = fpath.split("/")[-2]
fname = os.path.basename(fpath)
record = {
"image": fpath,
"image_id": int(fname.split(".")[0]),
"label": siscore_labels.IMAGENET_LABELS[label],
"dataset_label": siscore_labels.DATASET_LABELS[label],
}
yield fname, record
| apache-2.0 | 4,582,501,919,405,156 | 34.625 | 278 | 0.682456 | false |
allanlei/flask-countries | flask_countries/__init__.py | 1 | 23836 | ## django-countries https://bitbucket.org/smileychris/django-countries
# Temporary gettext pass thru
def _(text):
return text
# Nicely titled (and translatable) country names.
COUNTRIES = (
('AF', _(u'Afghanistan')),
('AX', _(u'\xc5land Islands')),
('AL', _(u'Albania')),
('DZ', _(u'Algeria')),
('AS', _(u'American Samoa')),
('AD', _(u'Andorra')),
('AO', _(u'Angola')),
('AI', _(u'Anguilla')),
('AQ', _(u'Antarctica')),
('AG', _(u'Antigua and Barbuda')),
('AR', _(u'Argentina')),
('AM', _(u'Armenia')),
('AW', _(u'Aruba')),
('AU', _(u'Australia')),
('AT', _(u'Austria')),
('AZ', _(u'Azerbaijan')),
('BS', _(u'Bahamas')),
('BH', _(u'Bahrain')),
('BD', _(u'Bangladesh')),
('BB', _(u'Barbados')),
('BY', _(u'Belarus')),
('BE', _(u'Belgium')),
('BZ', _(u'Belize')),
('BJ', _(u'Benin')),
('BM', _(u'Bermuda')),
('BT', _(u'Bhutan')),
('BO', _(u'Bolivia, Plurinational State of')),
('BQ', _(u'Bonaire, Sint Eustatius and Saba')),
('BA', _(u'Bosnia and Herzegovina')),
('BW', _(u'Botswana')),
('BV', _(u'Bouvet Island')),
('BR', _(u'Brazil')),
('IO', _(u'British Indian Ocean Territory')),
('BN', _(u'Brunei Darussalam')),
('BG', _(u'Bulgaria')),
('BF', _(u'Burkina Faso')),
('BI', _(u'Burundi')),
('KH', _(u'Cambodia')),
('CM', _(u'Cameroon')),
('CA', _(u'Canada')),
('CV', _(u'Cape Verde')),
('KY', _(u'Cayman Islands')),
('CF', _(u'Central African Republic')),
('TD', _(u'Chad')),
('CL', _(u'Chile')),
('CN', _(u'China')),
('CX', _(u'Christmas Island')),
('CC', _(u'Cocos (Keeling) Islands')),
('CO', _(u'Colombia')),
('KM', _(u'Comoros')),
('CG', _(u'Congo')),
('CD', _(u'Congo, The Democratic Republic of the')),
('CK', _(u'Cook Islands')),
('CR', _(u'Costa Rica')),
('CI', _(u"C\xf4te D'ivoire")),
('HR', _(u'Croatia')),
('CU', _(u'Cuba')),
('CW', _(u'Cura\xe7ao')),
('CY', _(u'Cyprus')),
('CZ', _(u'Czech Republic')),
('DK', _(u'Denmark')),
('DJ', _(u'Djibouti')),
('DM', _(u'Dominica')),
('DO', _(u'Dominican Republic')),
('EC', _(u'Ecuador')),
('EG', _(u'Egypt')),
('SV', _(u'El Salvador')),
('GQ', _(u'Equatorial Guinea')),
('ER', _(u'Eritrea')),
('EE', _(u'Estonia')),
('ET', _(u'Ethiopia')),
('FK', _(u'Falkland Islands (Malvinas)')),
('FO', _(u'Faroe Islands')),
('FJ', _(u'Fiji')),
('FI', _(u'Finland')),
('FR', _(u'France')),
('GF', _(u'French Guiana')),
('PF', _(u'French Polynesia')),
('TF', _(u'French Southern Territories')),
('GA', _(u'Gabon')),
('GM', _(u'Gambia')),
('GE', _(u'Georgia')),
('DE', _(u'Germany')),
('GH', _(u'Ghana')),
('GI', _(u'Gibraltar')),
('GR', _(u'Greece')),
('GL', _(u'Greenland')),
('GD', _(u'Grenada')),
('GP', _(u'Guadeloupe')),
('GU', _(u'Guam')),
('GT', _(u'Guatemala')),
('GG', _(u'Guernsey')),
('GN', _(u'Guinea')),
('GW', _(u'Guinea-bissau')),
('GY', _(u'Guyana')),
('HT', _(u'Haiti')),
('HM', _(u'Heard Island and McDonald Islands')),
('VA', _(u'Holy See (Vatican City State)')),
('HN', _(u'Honduras')),
('HK', _(u'Hong Kong')),
('HU', _(u'Hungary')),
('IS', _(u'Iceland')),
('IN', _(u'India')),
('ID', _(u'Indonesia')),
('IR', _(u'Iran, Islamic Republic of')),
('IQ', _(u'Iraq')),
('IE', _(u'Ireland')),
('IM', _(u'Isle of Man')),
('IL', _(u'Israel')),
('IT', _(u'Italy')),
('JM', _(u'Jamaica')),
('JP', _(u'Japan')),
('JE', _(u'Jersey')),
('JO', _(u'Jordan')),
('KZ', _(u'Kazakhstan')),
('KE', _(u'Kenya')),
('KI', _(u'Kiribati')),
('KP', _(u"Korea, Democratic People's Republic of")),
('KR', _(u'Korea, Republic of')),
('KW', _(u'Kuwait')),
('KG', _(u'Kyrgyzstan')),
('LA', _(u"Lao People's Democratic Republic")),
('LV', _(u'Latvia')),
('LB', _(u'Lebanon')),
('LS', _(u'Lesotho')),
('LR', _(u'Liberia')),
('LY', _(u'Libya')),
('LI', _(u'Liechtenstein')),
('LT', _(u'Lithuania')),
('LU', _(u'Luxembourg')),
('MO', _(u'Macao')),
('MK', _(u'Macedonia, The Former Yugoslav Republic of')),
('MG', _(u'Madagascar')),
('MW', _(u'Malawi')),
('MY', _(u'Malaysia')),
('MV', _(u'Maldives')),
('ML', _(u'Mali')),
('MT', _(u'Malta')),
('MH', _(u'Marshall Islands')),
('MQ', _(u'Martinique')),
('MR', _(u'Mauritania')),
('MU', _(u'Mauritius')),
('YT', _(u'Mayotte')),
('MX', _(u'Mexico')),
('FM', _(u'Micronesia, Federated States of')),
('MD', _(u'Moldova, Republic of')),
('MC', _(u'Monaco')),
('MN', _(u'Mongolia')),
('ME', _(u'Montenegro')),
('MS', _(u'Montserrat')),
('MA', _(u'Morocco')),
('MZ', _(u'Mozambique')),
('MM', _(u'Myanmar')),
('NA', _(u'Namibia')),
('NR', _(u'Nauru')),
('NP', _(u'Nepal')),
('NL', _(u'Netherlands')),
('NC', _(u'New Caledonia')),
('NZ', _(u'New Zealand')),
('NI', _(u'Nicaragua')),
('NE', _(u'Niger')),
('NG', _(u'Nigeria')),
('NU', _(u'Niue')),
('NF', _(u'Norfolk Island')),
('MP', _(u'Northern Mariana Islands')),
('NO', _(u'Norway')),
('OM', _(u'Oman')),
('PK', _(u'Pakistan')),
('PW', _(u'Palau')),
('PS', _(u'Palestinian Territory, Occupied')),
('PA', _(u'Panama')),
('PG', _(u'Papua New Guinea')),
('PY', _(u'Paraguay')),
('PE', _(u'Peru')),
('PH', _(u'Philippines')),
('PN', _(u'Pitcairn')),
('PL', _(u'Poland')),
('PT', _(u'Portugal')),
('PR', _(u'Puerto Rico')),
('QA', _(u'Qatar')),
('RE', _(u'R\xe9union')),
('RO', _(u'Romania')),
('RU', _(u'Russian Federation')),
('RW', _(u'Rwanda')),
('BL', _(u'Saint Barth\xe9lemy')),
('SH', _(u'Saint Helena, Ascension and Tristan Da Cunha')),
('KN', _(u'Saint Kitts and Nevis')),
('LC', _(u'Saint Lucia')),
('MF', _(u'Saint Martin (French Part)')),
('PM', _(u'Saint Pierre and Miquelon')),
('VC', _(u'Saint Vincent and the Grenadines')),
('WS', _(u'Samoa')),
('SM', _(u'San Marino')),
('ST', _(u'Sao Tome and Principe')),
('SA', _(u'Saudi Arabia')),
('SN', _(u'Senegal')),
('RS', _(u'Serbia')),
('SC', _(u'Seychelles')),
('SL', _(u'Sierra Leone')),
('SG', _(u'Singapore')),
('SX', _(u'Sint Maarten (Dutch Part)')),
('SK', _(u'Slovakia')),
('SI', _(u'Slovenia')),
('SB', _(u'Solomon Islands')),
('SO', _(u'Somalia')),
('ZA', _(u'South Africa')),
('GS', _(u'South Georgia and the South Sandwich Islands')),
('SS', _(u'South Sudan')),
('ES', _(u'Spain')),
('LK', _(u'Sri Lanka')),
('SD', _(u'Sudan')),
('SR', _(u'Suriname')),
('SJ', _(u'Svalbard and Jan Mayen')),
('SZ', _(u'Swaziland')),
('SE', _(u'Sweden')),
('CH', _(u'Switzerland')),
('SY', _(u'Syrian Arab Republic')),
('TW', _(u'Taiwan, Province of China')),
('TJ', _(u'Tajikistan')),
('TZ', _(u'Tanzania, United Republic of')),
('TH', _(u'Thailand')),
('TL', _(u'Timor-leste')),
('TG', _(u'Togo')),
('TK', _(u'Tokelau')),
('TO', _(u'Tonga')),
('TT', _(u'Trinidad and Tobago')),
('TN', _(u'Tunisia')),
('TR', _(u'Turkey')),
('TM', _(u'Turkmenistan')),
('TC', _(u'Turks and Caicos Islands')),
('TV', _(u'Tuvalu')),
('UG', _(u'Uganda')),
('UA', _(u'Ukraine')),
('AE', _(u'United Arab Emirates')),
('GB', _(u'United Kingdom')),
('US', _(u'United States')),
('UM', _(u'United States Minor Outlying Islands')),
('UY', _(u'Uruguay')),
('UZ', _(u'Uzbekistan')),
('VU', _(u'Vanuatu')),
('VE', _(u'Venezuela, Bolivarian Republic of')),
('VN', _(u'Viet Nam')),
('VG', _(u'Virgin Islands, British')),
('VI', _(u'Virgin Islands, U.S.')),
('WF', _(u'Wallis and Futuna')),
('EH', _(u'Western Sahara')),
('YE', _(u'Yemen')),
('ZM', _(u'Zambia')),
('ZW', _(u'Zimbabwe')),
)
# Nicely titled country names with duplicates for those which contain a comma
# (containing the non-comma'd version).
COUNTRIES_PLUS = (
('AF', _(u'Afghanistan')),
('AX', _(u'\xc5land Islands')),
('AL', _(u'Albania')),
('DZ', _(u'Algeria')),
('AS', _(u'American Samoa')),
('AD', _(u'Andorra')),
('AO', _(u'Angola')),
('AI', _(u'Anguilla')),
('AQ', _(u'Antarctica')),
('AG', _(u'Antigua and Barbuda')),
('AR', _(u'Argentina')),
('AM', _(u'Armenia')),
('AW', _(u'Aruba')),
('SH', _(u'Ascension and Tristan Da Cunha Saint Helena')),
('AU', _(u'Australia')),
('AT', _(u'Austria')),
('AZ', _(u'Azerbaijan')),
('BS', _(u'Bahamas')),
('BH', _(u'Bahrain')),
('BD', _(u'Bangladesh')),
('BB', _(u'Barbados')),
('BY', _(u'Belarus')),
('BE', _(u'Belgium')),
('BZ', _(u'Belize')),
('BJ', _(u'Benin')),
('BM', _(u'Bermuda')),
('BT', _(u'Bhutan')),
('VE', _(u'Bolivarian Republic of Venezuela')),
('BO', _(u'Bolivia, Plurinational State of')),
('BQ', _(u'Bonaire, Sint Eustatius and Saba')),
('BA', _(u'Bosnia and Herzegovina')),
('BW', _(u'Botswana')),
('BV', _(u'Bouvet Island')),
('BR', _(u'Brazil')),
('IO', _(u'British Indian Ocean Territory')),
('VG', _(u'British Virgin Islands')),
('BN', _(u'Brunei Darussalam')),
('BG', _(u'Bulgaria')),
('BF', _(u'Burkina Faso')),
('BI', _(u'Burundi')),
('KH', _(u'Cambodia')),
('CM', _(u'Cameroon')),
('CA', _(u'Canada')),
('CV', _(u'Cape Verde')),
('KY', _(u'Cayman Islands')),
('CF', _(u'Central African Republic')),
('TD', _(u'Chad')),
('CL', _(u'Chile')),
('CN', _(u'China')),
('CX', _(u'Christmas Island')),
('CC', _(u'Cocos (Keeling) Islands')),
('CO', _(u'Colombia')),
('KM', _(u'Comoros')),
('CG', _(u'Congo')),
('CD', _(u'Congo, The Democratic Republic of the')),
('CK', _(u'Cook Islands')),
('CR', _(u'Costa Rica')),
('CI', _(u"C\xf4te D'ivoire")),
('HR', _(u'Croatia')),
('CU', _(u'Cuba')),
('CW', _(u'Cura\xe7ao')),
('CY', _(u'Cyprus')),
('CZ', _(u'Czech Republic')),
('KP', _(u"Democratic People's Republic of Korea")),
('DK', _(u'Denmark')),
('DJ', _(u'Djibouti')),
('DM', _(u'Dominica')),
('DO', _(u'Dominican Republic')),
('EC', _(u'Ecuador')),
('EG', _(u'Egypt')),
('SV', _(u'El Salvador')),
('GQ', _(u'Equatorial Guinea')),
('ER', _(u'Eritrea')),
('EE', _(u'Estonia')),
('ET', _(u'Ethiopia')),
('FK', _(u'Falkland Islands (Malvinas)')),
('FO', _(u'Faroe Islands')),
('FM', _(u'Federated States of Micronesia')),
('FJ', _(u'Fiji')),
('FI', _(u'Finland')),
('FR', _(u'France')),
('GF', _(u'French Guiana')),
('PF', _(u'French Polynesia')),
('TF', _(u'French Southern Territories')),
('GA', _(u'Gabon')),
('GM', _(u'Gambia')),
('GE', _(u'Georgia')),
('DE', _(u'Germany')),
('GH', _(u'Ghana')),
('GI', _(u'Gibraltar')),
('GR', _(u'Greece')),
('GL', _(u'Greenland')),
('GD', _(u'Grenada')),
('GP', _(u'Guadeloupe')),
('GU', _(u'Guam')),
('GT', _(u'Guatemala')),
('GG', _(u'Guernsey')),
('GN', _(u'Guinea')),
('GW', _(u'Guinea-bissau')),
('GY', _(u'Guyana')),
('HT', _(u'Haiti')),
('HM', _(u'Heard Island and McDonald Islands')),
('VA', _(u'Holy See (Vatican City State)')),
('HN', _(u'Honduras')),
('HK', _(u'Hong Kong')),
('HU', _(u'Hungary')),
('IS', _(u'Iceland')),
('IN', _(u'India')),
('ID', _(u'Indonesia')),
('IR', _(u'Iran, Islamic Republic of')),
('IQ', _(u'Iraq')),
('IE', _(u'Ireland')),
('IR', _(u'Islamic Republic of Iran')),
('IM', _(u'Isle of Man')),
('IL', _(u'Israel')),
('IT', _(u'Italy')),
('JM', _(u'Jamaica')),
('JP', _(u'Japan')),
('JE', _(u'Jersey')),
('JO', _(u'Jordan')),
('KZ', _(u'Kazakhstan')),
('KE', _(u'Kenya')),
('KI', _(u'Kiribati')),
('KP', _(u"Korea, Democratic People's Republic of")),
('KR', _(u'Korea, Republic of')),
('KW', _(u'Kuwait')),
('KG', _(u'Kyrgyzstan')),
('LA', _(u"Lao People's Democratic Republic")),
('LV', _(u'Latvia')),
('LB', _(u'Lebanon')),
('LS', _(u'Lesotho')),
('LR', _(u'Liberia')),
('LY', _(u'Libya')),
('LI', _(u'Liechtenstein')),
('LT', _(u'Lithuania')),
('LU', _(u'Luxembourg')),
('MO', _(u'Macao')),
('MK', _(u'Macedonia, The Former Yugoslav Republic of')),
('MG', _(u'Madagascar')),
('MW', _(u'Malawi')),
('MY', _(u'Malaysia')),
('MV', _(u'Maldives')),
('ML', _(u'Mali')),
('MT', _(u'Malta')),
('MH', _(u'Marshall Islands')),
('MQ', _(u'Martinique')),
('MR', _(u'Mauritania')),
('MU', _(u'Mauritius')),
('YT', _(u'Mayotte')),
('MX', _(u'Mexico')),
('FM', _(u'Micronesia, Federated States of')),
('MD', _(u'Moldova, Republic of')),
('MC', _(u'Monaco')),
('MN', _(u'Mongolia')),
('ME', _(u'Montenegro')),
('MS', _(u'Montserrat')),
('MA', _(u'Morocco')),
('MZ', _(u'Mozambique')),
('MM', _(u'Myanmar')),
('NA', _(u'Namibia')),
('NR', _(u'Nauru')),
('NP', _(u'Nepal')),
('NL', _(u'Netherlands')),
('NC', _(u'New Caledonia')),
('NZ', _(u'New Zealand')),
('NI', _(u'Nicaragua')),
('NE', _(u'Niger')),
('NG', _(u'Nigeria')),
('NU', _(u'Niue')),
('NF', _(u'Norfolk Island')),
('MP', _(u'Northern Mariana Islands')),
('NO', _(u'Norway')),
('PS', _(u'Occupied Palestinian Territory')),
('OM', _(u'Oman')),
('PK', _(u'Pakistan')),
('PW', _(u'Palau')),
('PS', _(u'Palestinian Territory, Occupied')),
('PA', _(u'Panama')),
('PG', _(u'Papua New Guinea')),
('PY', _(u'Paraguay')),
('PE', _(u'Peru')),
('PH', _(u'Philippines')),
('PN', _(u'Pitcairn')),
('BO', _(u'Plurinational State of Bolivia')),
('PL', _(u'Poland')),
('PT', _(u'Portugal')),
('TW', _(u'Province of China Taiwan')),
('PR', _(u'Puerto Rico')),
('QA', _(u'Qatar')),
('KR', _(u'Republic of Korea')),
('MD', _(u'Republic of Moldova')),
('RE', _(u'R\xe9union')),
('RO', _(u'Romania')),
('RU', _(u'Russian Federation')),
('RW', _(u'Rwanda')),
('BL', _(u'Saint Barth\xe9lemy')),
('SH', _(u'Saint Helena, Ascension and Tristan Da Cunha')),
('KN', _(u'Saint Kitts and Nevis')),
('LC', _(u'Saint Lucia')),
('MF', _(u'Saint Martin (French Part)')),
('PM', _(u'Saint Pierre and Miquelon')),
('VC', _(u'Saint Vincent and the Grenadines')),
('WS', _(u'Samoa')),
('SM', _(u'San Marino')),
('ST', _(u'Sao Tome and Principe')),
('SA', _(u'Saudi Arabia')),
('SN', _(u'Senegal')),
('RS', _(u'Serbia')),
('SC', _(u'Seychelles')),
('SL', _(u'Sierra Leone')),
('SG', _(u'Singapore')),
('BQ', _(u'Sint Eustatius and Saba Bonaire')),
('SX', _(u'Sint Maarten (Dutch Part)')),
('SK', _(u'Slovakia')),
('SI', _(u'Slovenia')),
('SB', _(u'Solomon Islands')),
('SO', _(u'Somalia')),
('ZA', _(u'South Africa')),
('GS', _(u'South Georgia and the South Sandwich Islands')),
('SS', _(u'South Sudan')),
('ES', _(u'Spain')),
('LK', _(u'Sri Lanka')),
('SD', _(u'Sudan')),
('SR', _(u'Suriname')),
('SJ', _(u'Svalbard and Jan Mayen')),
('SZ', _(u'Swaziland')),
('SE', _(u'Sweden')),
('CH', _(u'Switzerland')),
('SY', _(u'Syrian Arab Republic')),
('TW', _(u'Taiwan, Province of China')),
('TJ', _(u'Tajikistan')),
('TZ', _(u'Tanzania, United Republic of')),
('TH', _(u'Thailand')),
('CD', _(u'The Democratic Republic of the Congo')),
('MK', _(u'The Former Yugoslav Republic of Macedonia')),
('TL', _(u'Timor-leste')),
('TG', _(u'Togo')),
('TK', _(u'Tokelau')),
('TO', _(u'Tonga')),
('TT', _(u'Trinidad and Tobago')),
('TN', _(u'Tunisia')),
('TR', _(u'Turkey')),
('TM', _(u'Turkmenistan')),
('TC', _(u'Turks and Caicos Islands')),
('TV', _(u'Tuvalu')),
('VI', _(u'U.S. Virgin Islands')),
('UG', _(u'Uganda')),
('UA', _(u'Ukraine')),
('AE', _(u'United Arab Emirates')),
('GB', _(u'United Kingdom')),
('TZ', _(u'United Republic of Tanzania')),
('US', _(u'United States')),
('UM', _(u'United States Minor Outlying Islands')),
('UY', _(u'Uruguay')),
('UZ', _(u'Uzbekistan')),
('VU', _(u'Vanuatu')),
('VE', _(u'Venezuela, Bolivarian Republic of')),
('VN', _(u'Viet Nam')),
('VG', _(u'Virgin Islands, British')),
('VI', _(u'Virgin Islands, U.S.')),
('WF', _(u'Wallis and Futuna')),
('EH', _(u'Western Sahara')),
('YE', _(u'Yemen')),
('ZM', _(u'Zambia')),
('ZW', _(u'Zimbabwe')),
)
# Official capitalized country names.
OFFICIAL_COUNTRIES = {
'AF': u'AFGHANISTAN',
'AX': u'\xc5LAND ISLANDS',
'AL': u'ALBANIA',
'DZ': u'ALGERIA',
'AS': u'AMERICAN SAMOA',
'AD': u'ANDORRA',
'AO': u'ANGOLA',
'AI': u'ANGUILLA',
'AQ': u'ANTARCTICA',
'AG': u'ANTIGUA AND BARBUDA',
'AR': u'ARGENTINA',
'AM': u'ARMENIA',
'AW': u'ARUBA',
'AU': u'AUSTRALIA',
'AT': u'AUSTRIA',
'AZ': u'AZERBAIJAN',
'BS': u'BAHAMAS',
'BH': u'BAHRAIN',
'BD': u'BANGLADESH',
'BB': u'BARBADOS',
'BY': u'BELARUS',
'BE': u'BELGIUM',
'BZ': u'BELIZE',
'BJ': u'BENIN',
'BM': u'BERMUDA',
'BT': u'BHUTAN',
'BO': u'BOLIVIA, PLURINATIONAL STATE OF',
'BQ': u'BONAIRE, SINT EUSTATIUS AND SABA',
'BA': u'BOSNIA AND HERZEGOVINA',
'BW': u'BOTSWANA',
'BV': u'BOUVET ISLAND',
'BR': u'BRAZIL',
'IO': u'BRITISH INDIAN OCEAN TERRITORY',
'BN': u'BRUNEI DARUSSALAM',
'BG': u'BULGARIA',
'BF': u'BURKINA FASO',
'BI': u'BURUNDI',
'KH': u'CAMBODIA',
'CM': u'CAMEROON',
'CA': u'CANADA',
'CV': u'CAPE VERDE',
'KY': u'CAYMAN ISLANDS',
'CF': u'CENTRAL AFRICAN REPUBLIC',
'TD': u'CHAD',
'CL': u'CHILE',
'CN': u'CHINA',
'CX': u'CHRISTMAS ISLAND',
'CC': u'COCOS (KEELING) ISLANDS',
'CO': u'COLOMBIA',
'KM': u'COMOROS',
'CG': u'CONGO',
'CD': u'CONGO, THE DEMOCRATIC REPUBLIC OF THE',
'CK': u'COOK ISLANDS',
'CR': u'COSTA RICA',
'CI': u"C\xd4TE D'IVOIRE",
'HR': u'CROATIA',
'CU': u'CUBA',
'CW': u'CURA\xc7AO',
'CY': u'CYPRUS',
'CZ': u'CZECH REPUBLIC',
'DK': u'DENMARK',
'DJ': u'DJIBOUTI',
'DM': u'DOMINICA',
'DO': u'DOMINICAN REPUBLIC',
'EC': u'ECUADOR',
'EG': u'EGYPT',
'SV': u'EL SALVADOR',
'GQ': u'EQUATORIAL GUINEA',
'ER': u'ERITREA',
'EE': u'ESTONIA',
'ET': u'ETHIOPIA',
'FK': u'FALKLAND ISLANDS (MALVINAS)',
'FO': u'FAROE ISLANDS',
'FJ': u'FIJI',
'FI': u'FINLAND',
'FR': u'FRANCE',
'GF': u'FRENCH GUIANA',
'PF': u'FRENCH POLYNESIA',
'TF': u'FRENCH SOUTHERN TERRITORIES',
'GA': u'GABON',
'GM': u'GAMBIA',
'GE': u'GEORGIA',
'DE': u'GERMANY',
'GH': u'GHANA',
'GI': u'GIBRALTAR',
'GR': u'GREECE',
'GL': u'GREENLAND',
'GD': u'GRENADA',
'GP': u'GUADELOUPE',
'GU': u'GUAM',
'GT': u'GUATEMALA',
'GG': u'GUERNSEY',
'GN': u'GUINEA',
'GW': u'GUINEA-BISSAU',
'GY': u'GUYANA',
'HT': u'HAITI',
'HM': u'HEARD ISLAND AND MCDONALD ISLANDS',
'VA': u'HOLY SEE (VATICAN CITY STATE)',
'HN': u'HONDURAS',
'HK': u'HONG KONG',
'HU': u'HUNGARY',
'IS': u'ICELAND',
'IN': u'INDIA',
'ID': u'INDONESIA',
'IR': u'IRAN, ISLAMIC REPUBLIC OF',
'IQ': u'IRAQ',
'IE': u'IRELAND',
'IM': u'ISLE OF MAN',
'IL': u'ISRAEL',
'IT': u'ITALY',
'JM': u'JAMAICA',
'JP': u'JAPAN',
'JE': u'JERSEY',
'JO': u'JORDAN',
'KZ': u'KAZAKHSTAN',
'KE': u'KENYA',
'KI': u'KIRIBATI',
'KP': u"KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF",
'KR': u'KOREA, REPUBLIC OF',
'KW': u'KUWAIT',
'KG': u'KYRGYZSTAN',
'LA': u"LAO PEOPLE'S DEMOCRATIC REPUBLIC",
'LV': u'LATVIA',
'LB': u'LEBANON',
'LS': u'LESOTHO',
'LR': u'LIBERIA',
'LY': u'LIBYA',
'LI': u'LIECHTENSTEIN',
'LT': u'LITHUANIA',
'LU': u'LUXEMBOURG',
'MO': u'MACAO',
'MK': u'MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF',
'MG': u'MADAGASCAR',
'MW': u'MALAWI',
'MY': u'MALAYSIA',
'MV': u'MALDIVES',
'ML': u'MALI',
'MT': u'MALTA',
'MH': u'MARSHALL ISLANDS',
'MQ': u'MARTINIQUE',
'MR': u'MAURITANIA',
'MU': u'MAURITIUS',
'YT': u'MAYOTTE',
'MX': u'MEXICO',
'FM': u'MICRONESIA, FEDERATED STATES OF',
'MD': u'MOLDOVA, REPUBLIC OF',
'MC': u'MONACO',
'MN': u'MONGOLIA',
'ME': u'MONTENEGRO',
'MS': u'MONTSERRAT',
'MA': u'MOROCCO',
'MZ': u'MOZAMBIQUE',
'MM': u'MYANMAR',
'NA': u'NAMIBIA',
'NR': u'NAURU',
'NP': u'NEPAL',
'NL': u'NETHERLANDS',
'NC': u'NEW CALEDONIA',
'NZ': u'NEW ZEALAND',
'NI': u'NICARAGUA',
'NE': u'NIGER',
'NG': u'NIGERIA',
'NU': u'NIUE',
'NF': u'NORFOLK ISLAND',
'MP': u'NORTHERN MARIANA ISLANDS',
'NO': u'NORWAY',
'OM': u'OMAN',
'PK': u'PAKISTAN',
'PW': u'PALAU',
'PS': u'PALESTINIAN TERRITORY, OCCUPIED',
'PA': u'PANAMA',
'PG': u'PAPUA NEW GUINEA',
'PY': u'PARAGUAY',
'PE': u'PERU',
'PH': u'PHILIPPINES',
'PN': u'PITCAIRN',
'PL': u'POLAND',
'PT': u'PORTUGAL',
'PR': u'PUERTO RICO',
'QA': u'QATAR',
'RE': u'R\xc9UNION',
'RO': u'ROMANIA',
'RU': u'RUSSIAN FEDERATION',
'RW': u'RWANDA',
'BL': u'SAINT BARTH\xc9LEMY',
'SH': u'SAINT HELENA, ASCENSION AND TRISTAN DA CUNHA',
'KN': u'SAINT KITTS AND NEVIS',
'LC': u'SAINT LUCIA',
'MF': u'SAINT MARTIN (FRENCH PART)',
'PM': u'SAINT PIERRE AND MIQUELON',
'VC': u'SAINT VINCENT AND THE GRENADINES',
'WS': u'SAMOA',
'SM': u'SAN MARINO',
'ST': u'SAO TOME AND PRINCIPE',
'SA': u'SAUDI ARABIA',
'SN': u'SENEGAL',
'RS': u'SERBIA',
'SC': u'SEYCHELLES',
'SL': u'SIERRA LEONE',
'SG': u'SINGAPORE',
'SX': u'SINT MAARTEN (DUTCH PART)',
'SK': u'SLOVAKIA',
'SI': u'SLOVENIA',
'SB': u'SOLOMON ISLANDS',
'SO': u'SOMALIA',
'ZA': u'SOUTH AFRICA',
'GS': u'SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS',
'SS': u'SOUTH SUDAN',
'ES': u'SPAIN',
'LK': u'SRI LANKA',
'SD': u'SUDAN',
'SR': u'SURINAME',
'SJ': u'SVALBARD AND JAN MAYEN',
'SZ': u'SWAZILAND',
'SE': u'SWEDEN',
'CH': u'SWITZERLAND',
'SY': u'SYRIAN ARAB REPUBLIC',
'TW': u'TAIWAN, PROVINCE OF CHINA',
'TJ': u'TAJIKISTAN',
'TZ': u'TANZANIA, UNITED REPUBLIC OF',
'TH': u'THAILAND',
'TL': u'TIMOR-LESTE',
'TG': u'TOGO',
'TK': u'TOKELAU',
'TO': u'TONGA',
'TT': u'TRINIDAD AND TOBAGO',
'TN': u'TUNISIA',
'TR': u'TURKEY',
'TM': u'TURKMENISTAN',
'TC': u'TURKS AND CAICOS ISLANDS',
'TV': u'TUVALU',
'UG': u'UGANDA',
'UA': u'UKRAINE',
'AE': u'UNITED ARAB EMIRATES',
'GB': u'UNITED KINGDOM',
'US': u'UNITED STATES',
'UM': u'UNITED STATES MINOR OUTLYING ISLANDS',
'UY': u'URUGUAY',
'UZ': u'UZBEKISTAN',
'VU': u'VANUATU',
'VE': u'VENEZUELA, BOLIVARIAN REPUBLIC OF',
'VN': u'VIET NAM',
'VG': u'VIRGIN ISLANDS, BRITISH',
'VI': u'VIRGIN ISLANDS, U.S.',
'WF': u'WALLIS AND FUTUNA',
'EH': u'WESTERN SAHARA',
'YE': u'YEMEN',
'ZM': u'ZAMBIA',
'ZW': u'ZIMBABWE',
}
class Countries(object):
def __init__(self, app, *args, **kwargs):
if app:
self.init_app(app, *args, **kwargs)
def init_app(self, app):
self.app = app
self.app.context_processor(self.context_processor)
def context_processor(self):
return {
# 'COUNTRIES': COUNTRIES,
'COUNTRIES_PLUS': COUNTRIES_PLUS,
} | bsd-3-clause | 1,601,909,327,133,766,700 | 28.870927 | 77 | 0.468451 | false |
DavidAndreev/indico | indico_zodbimport/modules/networks.py | 1 | 2990 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
from ipaddress import ip_network
from indico.core.db import db
from indico.modules.networks.models.networks import IPNetworkGroup
from indico.util.console import cformat
from indico.util.struct.iterables import committing_iterator
from indico_zodbimport import Importer, convert_to_unicode
class NetworkImporter(Importer):
def has_data(self):
return IPNetworkGroup.has_rows()
def migrate(self):
self.migrate_networks()
def _to_network(self, mask):
mask = convert_to_unicode(mask).strip()
net = None
if re.match(r'^[0-9.]+$', mask):
# ipv4 mask
mask = mask.rstrip('.')
segments = mask.split('.')
if len(segments) <= 4:
addr = '.'.join(segments + ['0'] * (4 - len(segments)))
net = ip_network('{}/{}'.format(addr, 8 * len(segments)))
elif re.match(r'^[0-9a-f:]+', mask):
# ipv6 mask
mask = mask.rstrip(':') # there shouldn't be a `::` in the IP as it was a startswith-like check before
segments = mask.split(':')
if len(segments) <= 8:
addr = ':'.join(segments + ['0'] * (8 - len(segments)))
net = ip_network('{}/{}'.format(addr, 16 * len(segments)))
if net is None:
self.print_warning(cformat('%{yellow!}Skipped invalid mask: {}').format(mask))
return net
def migrate_networks(self):
self.print_step('migrating networks')
for domain in committing_iterator(self._iter_domains()):
ip_networks = filter(None, map(self._to_network, set(domain.filterList)))
if not ip_networks:
self.print_warning(cformat('%{yellow}Domain has no valid IPs: {}')
.format(convert_to_unicode(domain.name)))
network = IPNetworkGroup(name=convert_to_unicode(domain.name),
description=convert_to_unicode(domain.description), networks=ip_networks)
db.session.add(network)
self.print_success(repr(network))
db.session.flush()
def _iter_domains(self):
return self.zodb_root['domains'].itervalues()
| gpl-3.0 | -2,274,356,279,102,685,200 | 40.527778 | 115 | 0.626087 | false |
joaormatos/anaconda | Anaconda/release/make_dist.py | 1 | 3494 | # Copyright (c) Mathias Kaerlev 2012.
# This file is part of Anaconda.
# Anaconda is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Anaconda is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Anaconda. If not, see <http://www.gnu.org/licenses/>.
import yaml
import os
import shutil
import subprocess
import time
import sys
from getrev import get_hg_rev
try:
COMMERCIAL = bool(int(sys.argv[1]))
except IndexError:
COMMERCIAL = True
ANACONDA_FILES = ['darwin.zip', 'linux2.zip', 'win32.zip', 'make.exe',
'png2ico.exe', 'extensions', 'extensions.dat']
RUNTIME_FILES = ['Anaconda.bld']
EXTENSION_FILES = []
EXAMPLE_FOLDERS = ['Anaconda']
TOOL_FILES = ['signhelper.py']
MMF2_DIRECTORY = 'D:\\Multimedia Fusion Developer 2\\'
OTHER_FILES = ['README.txt', 'CHANGELOG.txt']
# add custom EDIF extensions
for item in ('AnacondaShaders',):
RUNTIME_FILES.append('%s.json' % item)
RUNTIME_FILES.append('%s.png' % item)
EXTENSION_FILES.append('%s.mfx' % item)
# EXAMPLE_FOLDERS.append(item)
shutil.rmtree('tmp', ignore_errors = True)
shutil.rmtree('dist', ignore_errors = True)
os.mkdir('tmp')
os.mkdir('dist')
os.makedirs('tmp/Data/Runtime/Anaconda')
os.makedirs('tmp/Extensions')
os.makedirs('tmp/Examples')
os.makedirs('tmp/Tools/Anaconda')
runtime_directory = os.path.join(MMF2_DIRECTORY, 'Data', 'Runtime')
extensions_directory = os.path.join(MMF2_DIRECTORY, 'Extensions')
examples_directory = os.path.join(MMF2_DIRECTORY, 'Examples')
tools_directory = os.path.join('..', 'tools')
def ignore_filter(dir, items):
ignored = []
for item in items:
if item.split('.')[-1].startswith('00'):
ignored.append(item)
return ignored
def copy(source, dest):
if os.path.isdir(source):
shutil.copytree(source, dest, ignore = ignore_filter)
else:
shutil.copy(source, dest)
for file in RUNTIME_FILES:
copy(os.path.join(runtime_directory, file),
os.path.join('tmp', 'Data', 'Runtime', file))
for file in EXTENSION_FILES:
copy(os.path.join(extensions_directory, file),
os.path.join('tmp', 'Extensions', file))
for file in TOOL_FILES:
copy(os.path.join(tools_directory, file),
os.path.join('tmp', 'Tools', 'Anaconda', file))
for dir in EXAMPLE_FOLDERS:
copy(os.path.join(examples_directory, dir),
os.path.join('tmp', 'Examples', dir))
anaconda_directory = os.path.join(runtime_directory, 'Anaconda')
for file in ANACONDA_FILES:
copy(os.path.join(anaconda_directory, file),
os.path.join('tmp', 'Data', 'Runtime', 'Anaconda', file))
for file in OTHER_FILES:
copy(file, os.path.join('tmp', file))
# copy license file
if COMMERCIAL:
license_file = 'COMMERCIAL_LICENSE.txt'
else:
license_file = 'DEMO_LICENSE.txt'
copy(license_file, os.path.join('tmp', 'LICENSE.txt'))
VERSION = 'This version was distributed %s\nRevision at %s' % (
time.strftime('%c'), get_hg_rev())
open('./tmp/VERSION.txt', 'wb').write(VERSION)
subprocess.check_call(r'7z a ../dist/anaconda.zip', cwd = './tmp/') | gpl-3.0 | -3,242,928,090,785,773,600 | 30.205357 | 71 | 0.688895 | false |
samuelcolvin/JuliaByExample | deps/build.py | 1 | 8258 | #!/usr/bin/python
"""
This script generates a simple website showing examples and an about page.
The about page is generated from README.md and the examples are a concatenation
of a description markdown file and the examples.
The "julia sources examples" are taken verbatim from the julia source code:
https://github.com/JuliaLang/julia hence the script has to clone or pull the entire
julia source to extract the examples
In theory it would be cool if this was written in Julia too, however libraries like
urllib, pygments and jinja2 or their equivalents are not yet available in julia.
"""
import os
import sys
import shutil
import markdown2
import re
import codecs
import grablib
from jinja2 import contextfunction, Markup, FileSystemLoader, Environment
from pygments import highlight
import pygments.lexers as pyg_lexers
from pygments.formatters import HtmlFormatter
THIS_PATH = os.path.dirname(os.path.realpath(__file__))
PROJ_ROOT = os.path.realpath(os.path.join(THIS_PATH, os.pardir))
STATIC_PATH = os.path.join(THIS_PATH, 'static')
WWW_PATH = os.path.join(PROJ_ROOT, 'www')
WWW_STATIC_PATH = os.path.join(WWW_PATH, 'static')
ROOT_URL = 'http://www.scolvin.com/juliabyexample'
def _smart_comments(match):
"""
replace markdown style links with html "<a href..."
convert **strong** to html
"""
comment = match.groups()[0]
comment = re.sub('\[(.*?)\]\((.*?)\)', r'<a href="\2" target="_blank">\1</a>', comment)
comment = re.sub('\*\*(.*?)\*\*', r'<strong>\1</strong>', comment)
return '<span class="c">%s</span>' % comment
def src_file_copy(context, file_name):
ex_dir = context['example_directory']
file_path = os.path.join(PROJ_ROOT, ex_dir, file_name)
new_path = os.path.join(WWW_PATH, ex_dir)
if not os.path.exists(new_path):
os.makedirs(new_path)
shutil.copyfile(file_path, os.path.join(new_path, file_name))
return ex_dir, file_path
@contextfunction
def code_file(context, file_name, **kwargs):
ex_dir, file_path = src_file_copy(context, file_name)
file_text = codecs.open(file_path, encoding='utf-8').read()
url = '/'.join(s.strip('/') for s in [ex_dir, file_name])
url = url.replace('/./', '/')
download_link = ('<a class="download-link" href="%s" title="download %s" data-toggle="tooltip" '
'data-placement="bottom">'
'<span class="glyphicon glyphicon-cloud-download"></span></a>') % (url, file_name)
# remove hidden sections
regex = re.compile('\n*# *<hide>.*# *</hide>', flags=re.M | re.S)
code = re.sub(regex, '', file_text)
code = code.strip(' \r\n')
lexer = pyg_lexers.get_lexer_for_filename(file_name)
formatter = HtmlFormatter(cssclass='code') # linenos=True,
git_url = '%s/%s/%s' % (context['view_root'], context['example_repo_dir'], file_name)
code = highlight(code, lexer, formatter)
code = re.sub('<span class="c">(.*?)</span>', _smart_comments, code)
response = """<a class="git-link" href="%s" data-toggle="tooltip" data-placement="bottom"
target="_blank" title="go to github"><img src="static/github.png" alt="Github Link"/></a>%s\n%s\n""" % \
(git_url, download_link, code)
return Markup(response)
@contextfunction
def src_image(context, file_name, **kwargs):
ex_dir, file_path = src_file_copy(context, file_name)
url = '/'.join(s.strip('/') for s in [ex_dir, file_name])
url = url.replace('/./', '/')
return '<img class="source-image" src="%s" alt="%s"/>' % (url, file_name)
@contextfunction
def src_iframe(context, file_name, **kwargs):
ex_dir, file_path = src_file_copy(context, file_name)
url = '/'.join(s.strip('/') for s in [ex_dir, file_name])
url = url.replace('/./', '/')
return '<iframe class="source-iframe" frameborder="0" src="%s">%s</iframe>' % (url, file_name)
class SiteGenerator(object):
ctx = {}
tags = []
def __init__(self, output=None):
if output:
self._output = output
self._env = Environment(loader=FileSystemLoader(THIS_PATH))
self.delete_www()
self.generate_page()
self.generate_statics()
def _repl_tags(self, match):
hno, title = match.groups()
replacements = [(' ', '-'), ('.', '_'), (':', ''), ('&', '')]
tag_ref = title
for f, t in replacements:
tag_ref = tag_ref.replace(f, t)
for c in ['\-', ':', '\.']:
tag_ref = re.sub(r'%s%s+' % (c, c), c[-1], tag_ref)
self.tags.append({'link': '#' + tag_ref, 'name': title})
return '<h%s id="%s">%s<a href="#%s" class="hlink glyphicon glyphicon-link"></a></h%s>' \
% (hno, tag_ref, title, tag_ref, hno)
def generate_page(self):
example_dir = 'src'
self.test_for_missing_files(os.path.join(PROJ_ROOT, example_dir))
template = self._env.get_template('template.jinja')
ex_env = Environment(loader=FileSystemLoader(PROJ_ROOT))
ex_env.globals.update(
code_file=code_file,
src_image=src_image,
src_iframe=src_iframe
)
ex_template = ex_env.get_template('main_page.md')
examples = ex_template.render(example_directory=example_dir,
example_repo_dir='src',
view_root='https://github.com/samuelcolvin/JuliaByExample/blob/master')
examples = markdown2.markdown(examples)
examples = re.sub('<h([1-6])>(.*?)</h[1-6]>', self._repl_tags, examples, 0, re.I)
page_text = template.render(examples=examples, tags=self.tags, **self.ctx)
file_name = 'index.html'
page_path = os.path.join(WWW_PATH, file_name)
with open(page_path, 'w', encoding="UTF8") as f:
f.write(page_text)
self._output('generated %s' % file_name)
def test_for_missing_files(self, example_dir):
with open(os.path.join(PROJ_ROOT, 'main_page.md'), encoding="UTF8") as f:
desc_text = f.read()
quoted_files = set(re.findall("{{ *code_file\( *'(.*?)' *\) *}}", desc_text))
actual_files = set([fn for fn in os.listdir(example_dir) if
fn.endswith('.jl') and fn not in ['addcomments.jl', 'test_examples.jl']])
non_existent = quoted_files.difference(actual_files)
if len(non_existent) > 0:
self._output('*** QUOTED FILES ARE MISSING ***:')
self._output(' ' + ', '.join(non_existent))
unquoted = actual_files.difference(quoted_files)
if len(unquoted) > 0:
self._output('*** JULIA FILES EXIST WHICH ARE UNQUOTED ***:')
self._output(' ' + ', '.join(unquoted))
def generate_statics(self):
if os.path.exists(STATIC_PATH):
shutil.copytree(STATIC_PATH, WWW_STATIC_PATH)
self._output('copied local static files')
down_success = self.download_libraries()
def delete_www(self):
if os.path.exists(WWW_PATH):
shutil.rmtree(WWW_PATH)
self._output('deleting existing site: %s' % WWW_PATH)
os.makedirs(WWW_PATH)
def generate_pyg_css(self):
pyg_css = HtmlFormatter().get_style_defs('.code')
file_path = os.path.join(WWW_STATIC_PATH, 'pygments.css')
with open(file_path, 'w', encoding="UTF8") as f:
f.write(pyg_css)
def download_libraries(self):
grablib_path = os.path.join(THIS_PATH, 'grablib.json')
# libs_root has to be set manually so build works with different working directories
libs_root = os.path.join(PROJ_ROOT, 'www/static/external')
grablib.grab(grablib_path, libs_root=libs_root)
def _output(self, msg):
print(msg)
def list_examples_by_size(examples_dir='src'):
path = os.path.join(PROJ_ROOT, examples_dir)
files = [(os.path.getsize(os.path.join(path, fn)), fn) for fn in os.listdir(path)]
files.sort()
print(''.join(['\n\n#### %s\n\n{{ code_file(\'%s\') }} ' % (fn, fn) for _, fn in files if fn.endswith('.jl')]))
if __name__ == '__main__':
if 'j_example_list' in sys.argv:
list_examples_by_size()
else:
SiteGenerator()
print('Successfully generated site at %s' % WWW_PATH)
| mit | -5,121,403,770,089,996,000 | 39.480392 | 115 | 0.603657 | false |
teamfx/openjfx-9-dev-rt | modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/layout_tests/views/printing.py | 1 | 17814 | # Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Package that handles non-debug, non-file output for run-webkit-tests."""
import math
import optparse
from webkitpy.tool import grammar
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationParser
from webkitpy.layout_tests.views.metered_stream import MeteredStream
NUM_SLOW_TESTS_TO_LOG = 10
def print_options():
return [
optparse.make_option('-q', '--quiet', action='store_true', default=False,
help='run quietly (errors, warnings, and progress only)'),
optparse.make_option('-v', '--verbose', action='store_true', default=False,
help='print a summarized result for every test (one line per test)'),
optparse.make_option('--details', action='store_true', default=False,
help='print detailed results for every test'),
optparse.make_option('--debug-rwt-logging', action='store_true', default=False,
help='print timestamps and debug information for run-webkit-tests itself'),
]
class Printer(object):
"""Class handling all non-debug-logging printing done by run-webkit-tests."""
def __init__(self, port, options, regular_output, logger=None):
self.num_started = 0
self.num_tests = 0
self._port = port
self._options = options
self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
number_of_columns=self._port.host.platform.terminal_width())
self._running_tests = []
self._completed_tests = []
def cleanup(self):
self._meter.cleanup()
def __del__(self):
self.cleanup()
def print_config(self, results_directory):
self._print_default("Using port '%s'" % self._port.name())
self._print_default("Test configuration: %s" % self._port.test_configuration())
self._print_default("Placing test results in %s" % results_directory)
# FIXME: should these options be in printing_options?
if self._options.new_baseline:
self._print_default("Placing new baselines in %s" % self._port.baseline_path())
fs = self._port.host.filesystem
fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
self._print_default("Using %s build" % self._options.configuration)
if self._options.pixel_tests:
self._print_default("Pixel tests enabled")
else:
self._print_default("Pixel tests disabled")
self._print_default("Regular timeout: %s, slow test timeout: %s" %
(self._options.time_out_ms, self._options.slow_time_out_ms))
self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
self._print_default('')
def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
found_str = 'Found %s; running %d' % (grammar.pluralize(num_all_test_files, "test"), num_to_run)
if repeat_each * iterations > 1:
found_str += ' (%s each: --repeat-each=%d --iterations=%d)' % (grammar.pluralize(repeat_each * iterations, "time"), repeat_each, iterations)
found_str += ', skipping %d' % (num_all_test_files - num_to_run)
self._print_default(found_str + '.')
def print_expected(self, run_results, tests_with_result_type_callback):
self._print_expected_results_of_type(run_results, test_expectations.PASS, "passes", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FAIL, "failures", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
self._print_debug('')
def print_workers_and_shards(self, num_workers, num_shards):
driver_name = self._port.driver_name()
if num_workers == 1:
self._print_default("Running 1 %s." % driver_name)
self._print_debug("(%s)." % grammar.pluralize(num_shards, "shard"))
else:
self._print_default("Running %s in parallel." % (grammar.pluralize(num_workers, driver_name)))
self._print_debug("(%d shards)." % num_shards)
self._print_default('')
def _print_expected_results_of_type(self, run_results, result_type, result_type_str, tests_with_result_type_callback):
tests = tests_with_result_type_callback(result_type)
now = run_results.tests_by_timeline[test_expectations.NOW]
wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]
# We use a fancy format string in order to print the data out in a
# nicely-aligned table.
fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
% (self._num_digits(now), self._num_digits(wontfix)))
self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
def _num_digits(self, num):
ndigits = 1
if len(num):
ndigits = int(math.log10(len(num))) + 1
return ndigits
def print_results(self, run_time, run_results, summarized_results):
self._print_timing_statistics(run_time, run_results)
self._print_one_line_summary(run_results.total - run_results.expected_skips,
run_results.expected - run_results.expected_skips,
run_results.unexpected)
def _print_timing_statistics(self, total_time, run_results):
self._print_debug("Test timing:")
self._print_debug(" %6.2f total testing time" % total_time)
self._print_debug("")
self._print_worker_statistics(run_results, int(self._options.child_processes))
self._print_aggregate_test_statistics(run_results)
self._print_individual_test_times(run_results)
self._print_directory_timings(run_results)
def _print_worker_statistics(self, run_results, num_workers):
self._print_debug("Thread timing:")
stats = {}
cuml_time = 0
for result in run_results.results_by_name.values():
stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time': 0})
stats[result.worker_name]['num_tests'] += 1
stats[result.worker_name]['total_time'] += result.total_run_time
cuml_time += result.total_run_time
for worker_name in stats:
self._print_debug(" %10s: %5d tests, %6.2f secs" % (worker_name, stats[worker_name]['num_tests'], stats[worker_name]['total_time']))
self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / num_workers))
self._print_debug("")
def _print_aggregate_test_statistics(self, run_results):
times_for_dump_render_tree = [result.test_run_time for result in run_results.results_by_name.values()]
self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
def _print_individual_test_times(self, run_results):
# Reverse-sort by the time spent in DumpRenderTree.
individual_test_timings = sorted(run_results.results_by_name.values(), key=lambda result: result.test_run_time, reverse=True)
num_printed = 0
slow_tests = []
timeout_or_crash_tests = []
unexpected_slow_tests = []
for test_tuple in individual_test_timings:
test_name = test_tuple.test_name
is_timeout_crash_or_slow = False
if test_name in run_results.slow_tests:
is_timeout_crash_or_slow = True
slow_tests.append(test_tuple)
if test_name in run_results.failures_by_name:
result = run_results.results_by_name[test_name].type
if (result == test_expectations.TIMEOUT or
result == test_expectations.CRASH):
is_timeout_crash_or_slow = True
timeout_or_crash_tests.append(test_tuple)
if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG):
num_printed = num_printed + 1
unexpected_slow_tests.append(test_tuple)
self._print_debug("")
self._print_test_list_timing("%s slowest tests that are not marked as SLOW and did not timeout/crash:" %
NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
self._print_debug("")
self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
self._print_debug("")
self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
self._print_debug("")
def _print_test_list_timing(self, title, test_list):
self._print_debug(title)
for test_tuple in test_list:
test_run_time = round(test_tuple.test_run_time, 1)
self._print_debug(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
def _print_directory_timings(self, run_results):
stats = {}
for result in run_results.results_by_name.values():
stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0})
stats[result.shard_name]['num_tests'] += 1
stats[result.shard_name]['total_time'] += result.total_run_time
timings = []
for directory in stats:
timings.append((directory, round(stats[directory]['total_time'], 1), stats[directory]['num_tests']))
timings.sort()
self._print_debug("Time to process slowest subdirectories:")
min_seconds_to_print = 10
for timing in timings:
if timing[0] > min_seconds_to_print:
self._print_debug(" %s took %s seconds to run %s tests." % timing)
self._print_debug("")
def _print_statistics_for_test_timings(self, title, timings):
self._print_debug(title)
timings.sort()
num_tests = len(timings)
if not num_tests:
return
percentile90 = timings[int(.9 * num_tests)]
percentile99 = timings[int(.99 * num_tests)]
if num_tests % 2 == 1:
median = timings[((num_tests - 1) / 2) - 1]
else:
lower = timings[num_tests / 2 - 1]
upper = timings[num_tests / 2]
median = (float(lower + upper)) / 2
mean = sum(timings) / num_tests
for timing in timings:
sum_of_deviations = math.pow(timing - mean, 2)
std_deviation = math.sqrt(sum_of_deviations / num_tests)
self._print_debug(" Median: %6.3f" % median)
self._print_debug(" Mean: %6.3f" % mean)
self._print_debug(" 90th percentile: %6.3f" % percentile90)
self._print_debug(" 99th percentile: %6.3f" % percentile99)
self._print_debug(" Standard dev: %6.3f" % std_deviation)
self._print_debug("")
def _print_one_line_summary(self, total, expected, unexpected):
incomplete = total - expected - unexpected
incomplete_str = ''
if incomplete:
self._print_default("")
incomplete_str = " (%d didn't run)" % incomplete
if self._options.verbose or self._options.debug_rwt_logging or unexpected:
self.writeln("")
summary = ''
if unexpected == 0:
if expected == total:
if expected > 1:
summary = "All %d tests ran as expected." % expected
else:
summary = "The test ran as expected."
else:
summary = "%s ran as expected%s." % (grammar.pluralize(expected, "test"), incomplete_str)
else:
summary = "%s ran as expected, %d didn't%s:" % (grammar.pluralize(expected, "test"), unexpected, incomplete_str)
self._print_quiet(summary)
self._print_quiet("")
def _test_status_line(self, test_name, suffix, truncate=True):
format_string = '[%d/%d] %s%s'
status_line = format_string % (self.num_started, self.num_tests, test_name, suffix)
if truncate and len(status_line) > self._meter.number_of_columns():
overflow_columns = len(status_line) - self._meter.number_of_columns()
ellipsis = '...'
if len(test_name) < overflow_columns + len(ellipsis) + 2:
# We don't have enough space even if we elide, just show the test filename.
fs = self._port.host.filesystem
test_name = fs.split(test_name)[1]
else:
new_length = len(test_name) - overflow_columns - len(ellipsis)
prefix = int(new_length / 2)
test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
return format_string % (self.num_started, self.num_tests, test_name, suffix)
def print_started_test(self, test_name):
self.num_started += 1
self._running_tests.append(test_name)
if len(self._running_tests) > 1:
suffix = ' (+%d)' % (len(self._running_tests) - 1)
else:
suffix = ''
if self._options.verbose:
write = self._meter.write_update
else:
write = self._meter.write_throttled_update
write(self._test_status_line(test_name, suffix))
def print_finished_test(self, result, expected, exp_str, got_str):
test_name = result.test_name
result_message = self._result_message(result.type, result.failures, expected, self._options.verbose)
if self._options.details:
self._print_test_trace(result, exp_str, got_str)
elif (self._options.verbose and not self._options.debug_rwt_logging) or not expected:
self.writeln(self._test_status_line(test_name, result_message, truncate=False))
elif self.num_started == self.num_tests:
self._meter.write_update('')
else:
if test_name == self._running_tests[0]:
self._completed_tests.insert(0, [test_name, result_message])
else:
self._completed_tests.append([test_name, result_message])
for test_name, result_message in self._completed_tests:
self._meter.write_throttled_update(self._test_status_line(test_name, result_message, truncate=False))
self._completed_tests = []
self._running_tests.remove(test_name)
def _result_message(self, result_type, failures, expected, verbose):
exp_string = ' unexpectedly' if not expected else ''
if result_type == test_expectations.PASS:
return ' passed%s' % exp_string
else:
return ' failed%s (%s)' % (exp_string, ', '.join(failure.message() for failure in failures))
def _print_test_trace(self, result, exp_str, got_str):
test_name = result.test_name
self._print_default(self._test_status_line(test_name, ''))
for extension in ('.txt', '.png', '.wav', '.webarchive'):
self._print_baseline(test_name, extension)
self._print_default(' exp: %s' % exp_str)
self._print_default(' got: %s' % got_str)
self._print_default(' took: %-.3f' % result.test_run_time)
self._print_default('')
def _print_baseline(self, test_name, extension):
baseline = self._port.expected_filename(test_name, extension)
if self._port._filesystem.exists(baseline):
relpath = self._port.relative_test_filename(baseline)
else:
relpath = '<none>'
self._print_default(' %s: %s' % (extension[1:], relpath))
def _print_quiet(self, msg):
self.writeln(msg)
def _print_default(self, msg):
if not self._options.quiet:
self.writeln(msg)
def _print_debug(self, msg):
if self._options.debug_rwt_logging:
self.writeln(msg)
def write_update(self, msg):
self._meter.write_update(msg)
def writeln(self, msg):
self._meter.writeln(msg)
def flush(self):
self._meter.flush()
| gpl-2.0 | -4,109,730,330,662,061,000 | 45.031008 | 152 | 0.616145 | false |
the-it/WS_THEbotIT | tools/test_template_handler.py | 1 | 12193 | from unittest import TestCase
from tools.template_handler import TemplateHandler
test_title = "vorlage"
test_title_sperr = "Sperrsatz"
test_title_test = "testtitle"
test_string_argument_1 = "1=test1"
test_string_argument_1_no_key = "test1"
test_string_argument_2 = "2=test2"
test_string_argument_3 = "test3"
test_string_argument_4 = "4=test4"
test_string_argument_5 = "5=test5"
test_string_12_complex = "{{" + test_title + "\n|" + test_string_argument_1 + "\n|" + test_string_argument_2 + "\n}}"
test_string_12_simple = "{{" + test_title + "|" + test_string_argument_1 + "|" + test_string_argument_2 + "}}"
test_dict_argument_1 = {"key": "1", "value": "test1"}
test_dict_argument_1_no_key = {"key": None, "value": "test1"}
test_dict_argument_2 = {"key": "2", "value": "test2"}
test_dict_argument_3 = {"key": None, "value": "test3"}
test_dict_argument_4 = {"key": "4", "value": "test4"}
test_dict_argument_5 = {"key": "5", "value": "test5"}
test_list_12 = [test_dict_argument_1, test_dict_argument_2]
class TestTemplateHandler(TestCase):
def test_template_from_page(self):
handler = TemplateHandler(test_string_12_complex)
self.assertEqual(test_list_12, handler.get_parameterlist())
def test_get_parameter(self):
handler = TemplateHandler(test_string_12_complex)
self.assertEqual(test_dict_argument_1, handler.get_parameter("1"))
self.assertEqual(test_dict_argument_2, handler.get_parameter("2"))
def test_get_str(self):
handler = TemplateHandler()
handler.set_title(test_title)
handler.update_parameters(test_list_12)
self.assertEqual(test_string_12_simple, handler.get_str(str_complex=False))
self.assertEqual(test_string_12_complex, handler.get_str(str_complex=True))
def test_without_key(self):
test_string_12_no_key = "{{" + test_title + "|" \
+ test_string_argument_1_no_key + "|" \
+ test_string_argument_2 + "}}"
test_list_12_no_key = [test_dict_argument_1_no_key, test_dict_argument_2]
handler = TemplateHandler(test_string_12_no_key)
self.assertEqual(test_list_12_no_key, handler.get_parameterlist())
def test_update_parameters(self):
test_string_345_simple = "{{" + test_title + "|" \
+ test_string_argument_3 + "|" \
+ test_string_argument_4 + "|" \
+ test_string_argument_5 + "}}"
test_list_345 = [test_dict_argument_3, test_dict_argument_4, test_dict_argument_5]
handler = TemplateHandler(test_string_12_simple)
self.assertEqual(test_dict_argument_1, handler.get_parameter("1"))
self.assertEqual(test_dict_argument_2, handler.get_parameter("2"))
handler.update_parameters(test_list_345)
self.assertEqual(test_string_345_simple, handler.get_str(str_complex=False))
def test_template_in_template(self):
test_string_argument_template = "{{otherTemplate|other_argument}}"
test_string_12_template = "{{" + test_title + "|" \
+ test_string_argument_template + "|" \
+ test_string_argument_2 + "}}"
test_dict_template_no_key = {"key": None, "value": "{{otherTemplate|other_argument}}"}
test_list_template_no_key = [test_dict_template_no_key, test_dict_argument_2]
handler = TemplateHandler(test_string_12_template)
self.assertListEqual(test_list_template_no_key, handler.get_parameterlist())
del handler
test_string_argument_template2 = "{{Kapitaelchen|Test}}"
test_string_template_2 = "{{" + test_title_sperr + "|" + test_string_argument_template2 + "}}"
test_dict_template_2 = {"key": None, "value": "{{Kapitaelchen|Test}}"}
test_list_template_2 = [test_dict_template_2]
handler = TemplateHandler(test_string_template_2)
self.assertListEqual(test_list_template_2, handler.get_parameterlist())
del handler
test_string_argument_1_template = "1={{otherTemplate|other_argument}}"
test_string_12_template_no_key = "{{" + test_title + "|" \
+ test_string_argument_1_template + "|" \
+ test_string_argument_2 + "}}"
test_dict_template = {"key": "1", "value": "{{otherTemplate|other_argument}}"}
test_list_template = [test_dict_template, test_dict_argument_2]
handler = TemplateHandler(test_string_12_template_no_key)
self.assertListEqual(test_list_template, handler.get_parameterlist())
def test_set_title(self):
test_string_12_test_title = "{{" + test_title_test + "|" \
+ test_string_argument_1 + "|" \
+ test_string_argument_2 + "}}"
handler = TemplateHandler(test_string_12_simple)
handler.set_title(test_title_test)
self.assertEqual(test_string_12_test_title, handler.get_str(str_complex=False))
def test_link_with_text(self):
test_string_argument_2_link = "2 = [[link|text for link]] more"
test_string_12_link = "{{" + test_title + "|" \
+ test_string_argument_1_no_key + "|" \
+ test_string_argument_2_link + "}}"
test_dict_link = {"key": "2", "value": "[[link|text for link]] more"}
test_list_link = [test_dict_argument_1_no_key, test_dict_link]
handler = TemplateHandler(test_string_12_link)
self.assertEqual(test_list_link, handler.get_parameterlist())
del handler
test_string_argument_link = "[[link|text for link]] more"
test_string_12_link_no_key = "{{" + test_title + "|" \
+ test_string_argument_1_no_key + "|" \
+ test_string_argument_link + "}}"
test_dict_link_no_key = {"key": None, "value": "[[link|text for link]] more"}
test_list_link_no_key = [test_dict_argument_1_no_key, test_dict_link_no_key]
handler = TemplateHandler(test_string_12_link_no_key)
self.assertEqual(test_list_link_no_key, handler.get_parameterlist())
def test_second_equal(self):
test_string_argument_second_equal = "BILD=Der Todesgang des armenischen Volkes.pdf{{!}}page=276"
test_string_second_equal = "{{" + test_title_test + "|" + \
test_string_argument_1 + "|" + \
test_string_argument_second_equal + "}}"
test_dict_second_equal = {"key": "BILD", "value": "Der Todesgang des armenischen Volkes.pdf{{!}}page=276"}
test_list_second_equal = [test_dict_argument_1, test_dict_second_equal]
handler = TemplateHandler(test_string_second_equal)
self.assertEqual(test_list_second_equal, handler.get_parameterlist())
def test_bug_no_arguments(self):
test_string = "{{just_this}}"
handler = TemplateHandler(test_string)
self.assertListEqual([], handler.get_parameterlist())
def test_bug_authorlist(self):
test_string_argument_bug = "STERBEDATUM = 2. Januar < ref name = \"adp\" / > oder 31. Januar " \
"< ref > 49. Jahres - Bericht d.Schles.Ges.für vaterländische Cultur, S. 317, " \
"Nekrolog {{GBS|hP1DAAAAIAAJ|PA317}} < / ref > 1871"
test_string_bug = "{{" + test_title_test + "|" \
+ test_string_argument_1 + "|" \
+ test_string_argument_bug + "}}"
test_dict_bug = {"key": "STERBEDATUM",
"value": "2. Januar < ref name = \"adp\" / > oder 31. Januar "
"< ref > 49. Jahres - Bericht d.Schles.Ges.für vaterländische Cultur, S. 317, "
"Nekrolog {{GBS|hP1DAAAAIAAJ|PA317}} < / ref > 1871"}
test_list_bug = [test_dict_argument_1, test_dict_bug]
handler = TemplateHandler(test_string_bug)
real_dict = handler.get_parameterlist()
self.assertEqual(test_list_bug, real_dict)
test_string_argument_bug = "GEBURTSDATUM=1783 < ref name = \"EB\" > Encyclopaedia Britannica. " \
"11. Auflage(1911), Bd. 1, S.[[:en:Page:EB1911 - Volume 01. djvu / 792 | 748]] " \
"{{an | englisch, im Artikel}} < / ref >"
test_string_bug = "{{" + test_title_test + "|" \
+ test_string_argument_1 + "|" \
+ test_string_argument_bug + "}}"
test_dict_bug = {"key": "GEBURTSDATUM",
"value": "1783 < ref name = \"EB\" > Encyclopaedia Britannica. "
"11. Auflage(1911), Bd. 1, S.[[:en:Page:EB1911 - Volume 01. djvu / 792 | 748]] "
"{{an | englisch, im Artikel}} < / ref >"}
test_list_bug = [test_dict_argument_1, test_dict_bug]
handler = TemplateHandler(test_string_bug)
real_dict = handler.get_parameterlist()
self.assertEqual(test_list_bug, real_dict)
test_string_argument_bug = "GEBURTSORT=Klein Flottbek (heute zu [[Hamburg]])" \
"|STERBEDATUM=28. Oktober 1929|STERBEORT=[[Rom]]"
test_string_bug = "{{" + test_title_test + "|" + test_string_argument_1 + "|" + test_string_argument_bug + "}}"
test_dict_bug_1 = {"key": "GEBURTSORT", "value": "Klein Flottbek (heute zu [[Hamburg]])"}
test_dict_bug_2 = {"key": "STERBEDATUM", "value": "28. Oktober 1929"}
test_dict_bug_3 = {"key": "STERBEORT", "value": "[[Rom]]"}
test_list_bug = [test_dict_argument_1, test_dict_bug_1, test_dict_bug_2, test_dict_bug_3]
handler = TemplateHandler(test_string_bug)
real_dict = handler.get_parameterlist()
self.assertEqual(test_list_bug, real_dict)
test_string_argument_bug = "ALTERNATIVNAMEN = Carl Biedermann; Friedrich Karl Biedermann; " \
"Karl Friedrich 4[Pseudonym]" \
"|SONSTIGES=[http://gso.gbv.de/DB=1.28/REL?PPN=004072189&RELTYPE=TT " \
"Martin Opitz im VD 17]"
test_string_bug = "{{" + test_title_test + "|" + test_string_argument_1 + "|" + test_string_argument_bug + "}}"
test_dict_bug_1 = {"key": "ALTERNATIVNAMEN",
"value": "Carl Biedermann; Friedrich Karl Biedermann; Karl Friedrich 4[Pseudonym]"}
test_dict_bug_2 = {"key": "SONSTIGES",
"value": "[http://gso.gbv.de/DB=1.28/REL?PPN=004072189&RELTYPE=TT Martin Opitz im VD 17]"}
test_list_bug = [test_dict_argument_1, test_dict_bug_1, test_dict_bug_2]
handler = TemplateHandler(test_string_bug)
real_dict = handler.get_parameterlist()
self.assertEqual(test_list_bug, real_dict)
test_string_argument_bug = "SONSTIGES=Pächter der [[w:Harste|Domäne Harste]], " \
"Vater von [[w:Karl Henrici|Karl Henrici]]<ref>Zeitschrift des Vereins für " \
"Hamburgische Geschichte."" Band 42. 1953, S. 135 " \
"[http://books.google.de/books?id=1XISAAAAIAAJ Google]</ref>"
test_string_bug = "{{" + test_title_test + "|" + test_string_argument_1 + "|" + test_string_argument_bug + "}}"
test_dict_bug = {"key": "SONSTIGES",
"value": "Pächter der [[w:Harste|Domäne Harste]], "
"Vater von [[w:Karl Henrici|Karl Henrici]]<ref>""Zeitschrift des Vereins für "
"Hamburgische Geschichte."" Band 42. 1953, S. 135 "
"[http://books.google.de/books?id=1XISAAAAIAAJ Google]</ref>"}
test_list_bug = [test_dict_argument_1, test_dict_bug]
handler = TemplateHandler(test_string_bug)
real_dict = handler.get_parameterlist()
self.assertEqual(test_list_bug, real_dict)
| mit | -4,280,143,740,737,833,500 | 57.855072 | 119 | 0.564557 | false |
nicfit/eyed3 | eyed3/plugins/xep_118.py | 1 | 1600 | from pathlib import Path
from xml.sax.saxutils import escape
from eyed3.plugins import LoaderPlugin
from eyed3.utils.console import printMsg
class Xep118Plugin(LoaderPlugin):
NAMES = ["xep-118"]
SUMMARY = "Outputs all tags in XEP-118 XML format. "\
"(see: http://xmpp.org/extensions/xep-0118.html)"
def __init__(self, arg_parser):
super().__init__(arg_parser, cache_files=True, track_images=False)
g = self.arg_group
g.add_argument("--no-pretty-print", action="store_true",
help="Output without new lines or indentation.")
def handleFile(self, f, *args, **kwargs):
super().handleFile(f)
if self.audio_file and self.audio_file.tag:
xml = self.getXML(self.audio_file)
printMsg(xml)
def getXML(self, audio_file):
tag = audio_file.tag
pprint = not self.args.no_pretty_print
nl = "\n" if pprint else ""
indent = (" " * 2) if pprint else ""
xml = f"<tune xmlns='http://jabber.org/protocol/tune'>{nl}"
if tag.artist:
xml += f"{indent}<artist>{escape(tag.artist)}</artist>{nl}"
if tag.title:
xml += f"{indent}<title>{escape(tag.title)}</title>{nl}"
if tag.album:
xml += f"{indent}<source>{escape(tag.album)}</source>{nl}"
xml += f"{indent}<track>file://{escape(str(Path(audio_file.path).absolute()))}</track>{nl}"
if audio_file.info:
xml += f"{indent}<length>{audio_file.info.time_secs:.2f}</length>{nl}"
xml += "</tune>"
return xml
| gpl-2.0 | 5,556,477,815,858,504,000 | 34.555556 | 99 | 0.58125 | false |
rifqifatih/pokebot-slack | pgoapi/exceptions.py | 1 | 1361 | """
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
class AuthException(Exception):
pass
class NotLoggedInException(Exception):
pass
class ServerBusyOrOfflineException(Exception):
pass
class PleaseInstallProtobufVersion3(Exception):
pass | mit | 5,582,415,991,818,808,000 | 36.833333 | 78 | 0.790595 | false |
CospanDesign/verilog-visualizer | verilogviz/view/matplot_lib_widget.py | 1 | 1522 | from PyQt4.Qt import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from matplotlib.backends import qt4_compat
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg
from matplotlib.figure import Figure
#from matplotlib.pyplot import figure
import networkx as nx
class MatplotLibWidget(FigureCanvas):
def __init__(self, parent = None):
fig = Figure()
self.sp = fig.add_subplot(111)
FigureCanvas.__init__(self, fig)
self.sp.hold(False)
#layout = QVBoxLayout()
#self.figure = figure()
'''
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar2QTAgg(self.canvas, self)
'''
#layout.addWidget(self.toolbar)
#layout.addWidget(self.canvas)
#self.setLayout(layout)
'''
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
# We want the axes cleared every time plot() is called
self.axes.hold(False)
self.compute_initial_figure()
FigureCanvas.__init__(self, fig)
'''
self.setParent(parent)
self.setSizePolicy( QSizePolicy.Expanding,
QSizePolicy.Expanding)
self.updateGeometry()
def draw_graph(self, graph):
value = nx.draw(graph, ax=self.sp)
self.sp.draw(value)
def compute_initial_figure(self):
pass
| gpl-2.0 | -3,476,196,191,035,770,000 | 23.15873 | 80 | 0.634691 | false |
cghr/cghr-chef-repository | cookbooks/trac/files/default/plugins-stock/revision_links.py | 1 | 2160 | """Sample Wiki syntax extension plugin."""
from genshi.builder import tag
from trac.core import *
from trac.util.text import shorten_line
from trac.versioncontrol.api import NoSuchChangeset, RepositoryManager
from trac.versioncontrol.web_ui import ChangesetModule
from trac.wiki.api import IWikiSyntaxProvider
revision = "$Rev: 11490 $"
url = "$URL: https://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/revision_links.py $"
class RevisionLinks(Component):
"""Adds a few more ways to refer to changesets."""
implements(IWikiSyntaxProvider)
KEYWORDS = ['[Rr]ev(?:ision)?', '[Cc]hangeset']
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
def revlink(f, match, fullmatch):
elts = match.split()
rev = elts[1] # ignore keyword
reponame = ''
if len(elts) > 2: # reponame specified
reponame = elts[-1]
return self._format_revision_link(f, 'revision', reponame, rev, rev,
fullmatch)
yield (r"!?(?:%s)\s+%s(?:\s+in\s+\w+)?" %
("|".join(self.KEYWORDS), ChangesetModule.CHANGESET_ID), revlink)
def get_link_resolvers(self):
def resolverev(f, ns, rev, label, fullmatch):
return self._format_revision_link(f, ns, '', rev, label, fullmatch)
yield ('revision', resolverev)
def _format_revision_link(self, formatter, ns, reponame, rev, label,
fullmatch=None):
rev, params, fragment = formatter.split_link(rev)
try:
repos = RepositoryManager(self.env).get_repository(reponame)
if repos:
changeset = repos.get_changeset(rev)
return tag.a(label, class_="changeset",
title=shorten_line(changeset.message),
href=(formatter.href.changeset(rev) +
params + fragment))
except NoSuchChangeset:
pass
return tag.a(label, class_="missing changeset", rel="nofollow",
href=formatter.href.changeset(rev))
| apache-2.0 | 37,705,845,417,431,660 | 37.571429 | 100 | 0.586574 | false |
nifrob/pythonft | zte-wifi.py | 1 | 8437 | # -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class ztewifi(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def id_wait(self, wd, item_id):
wait = WebDriverWait(wd, 10)
element = wait.until(EC.element_to_be_clickable((By.ID, item_id)))
wd.find_element_by_id(item_id).click()
def test_ztewifi(self):
success = True
wd = self.wd
wait = WebDriverWait(wd, 10)
wd.get("http://192.168.0.1/")
self.id_wait(wd, "Frm_Username")
wd.find_element_by_id("Frm_Username").clear()
wd.find_element_by_id("Frm_Username").send_keys("admin")
self.id_wait(wd, "Frm_Password")
wd.find_element_by_id("Frm_Password").clear()
wd.find_element_by_id("Frm_Password").send_keys("admin1")
self.id_wait(wd, "LoginId")
time.sleep(5)
self.id_wait(wd, 'mmLocalnet')
self.id_wait(wd, 'smLocalWLAN')
self.id_wait(wd, 'WlanBasicAdConfBar')
# if not wd.find_element_by_xpath("//select[@id='UI_Channel:0']//option[4]").is_selected():
# wd.find_element_by_xpath("//select[@id='UI_Channel:0']//option[4]").click()
# if not wd.find_element_by_xpath("//select[@id='UI_Standard:0']//option[3]").is_selected():
# wd.find_element_by_xpath("//select[@id='UI_Standard:0']//option[3]").click()
self.id_wait(wd, 'Btn_apply_WlanBasicAdConf:0')
self.id_wait(wd, 'Btn_cancel_WlanBasicAdConf:0')
self.id_wait(wd, 'instName_WlanBasicAdConf:1')
## if not wd.find_element_by_xpath("//select[@id='UI_Channel:1']//option[6]").is_selected():
# wd.find_element_by_xpath("//select[@id='UI_Channel:1']//option[6]").click()
# if not wd.find_element_by_xpath("//select[@id='UI_Standard:1']//option[3]").is_selected():
# wd.find_element_by_xpath("//select[@id='UI_Standard:1']//option[3]").click()
self.id_wait(wd, "Btn_apply_WlanBasicAdConf:1")
self.id_wait(wd, "Btn_cancel_WlanBasicAdConf:1")
self.id_wait(wd, "WLANSSIDConfBar")
element = wait.until(EC.element_to_be_clickable((By.ID, 'ESSID:0')))
self.id_wait(wd, "ESSID:0")
wd.find_element_by_id("ESSID:0").clear()
wd.find_element_by_id("ESSID:0").send_keys("Test_1")
if not wd.find_element_by_id("ESSIDHideEnable1:0").is_selected():
self.id_wait(wd, "ESSIDHideEnable1:0")
self.id_wait(wd, "KeyPassphrase:0")
wd.find_element_by_id("KeyPassphrase:0").clear()
wd.find_element_by_id("KeyPassphrase:0").send_keys("1234567890")
self.id_wait(wd, "Btn_apply_WLANSSIDConf:0")
self.id_wait(wd, "Btn_apply_WLANSSIDConf:0")
time.sleep(2)
self.id_wait(wd, 'instName_WLANSSIDConf:1')
self.id_wait(wd, 'ESSID:1')
wd.find_element_by_id("ESSID:1").clear()
wd.find_element_by_id("ESSID:1").send_keys("Test_2")
if not wd.find_element_by_id("ESSIDHideEnable1:1").is_selected():
self.id_wait(wd, "ESSIDHideEnable1:1")
self.id_wait(wd, "KeyPassphrase:1")
wd.find_element_by_id("KeyPassphrase:1").clear()
wd.find_element_by_id("KeyPassphrase:1").send_keys("1234567890")
self.id_wait(wd, "MaxUserNum:1")
wd.find_element_by_id("MaxUserNum:1").clear()
wd.find_element_by_id("MaxUserNum:1").send_keys("2")
self.id_wait(wd, "Btn_apply_WLANSSIDConf:1")
time.sleep(2)
self.id_wait(wd, "instName_WLANSSIDConf:2")
element = wait.until(EC.element_to_be_clickable((By.ID, 'ESSID:2')))
self.id_wait(wd, "ESSID:2")
if not wd.find_element_by_id("Enable1:1").is_selected():
self.id_wait(wd, "Enable1:1")
if not wd.find_element_by_id("Enable1:2").is_selected():
self.id_wait(wd, "Enable1:2")
element = wait.until(EC.element_to_be_clickable((By.ID, 'ESSID:2')))
self.id_wait(wd, "ESSID:2")
wd.find_element_by_id("ESSID:2").clear()
wd.find_element_by_id("ESSID:2").send_keys("123")
self.id_wait(wd, "ESSID:2")
wd.find_element_by_id("ESSID:2").clear()
wd.find_element_by_id("ESSID:2").send_keys("Test_3")
self.id_wait(wd, "KeyPassphrase:2")
wd.find_element_by_id("KeyPassphrase:2").clear()
wd.find_element_by_id("KeyPassphrase:2").send_keys("1234567890")
if not wd.find_element_by_id("VapIsolationEnable1:2").is_selected():
self.id_wait(wd, "VapIsolationEnable1:2")
self.id_wait(wd, "Btn_apply_WLANSSIDConf:2")
time.sleep(2)
self.id_wait(wd, "instName_WLANSSIDConf:3")
element = wait.until(EC.element_to_be_clickable((By.ID, 'ESSID:3')))
self.id_wait(wd, "ESSID:3")
wd.find_element_by_id("ESSID:3").clear()
wd.find_element_by_id("ESSID:3").send_keys("Test_4")
if not wd.find_element_by_id("Enable1:3").is_selected():
self.id_wait(wd, "Enable1:3")
self.id_wait(wd, "KeyPassphrase:3")
wd.find_element_by_id("KeyPassphrase:3").clear()
wd.find_element_by_id("KeyPassphrase:3").send_keys("1234567890")
self.id_wait(wd, "Btn_apply_WLANSSIDConf:3")
time.sleep(2)
self.id_wait(wd, "instName_WLANSSIDConf:4")
self.id_wait(wd, "ESSID:4")
wd.find_element_by_id("ESSID:4").clear()
wd.find_element_by_id("ESSID:4").send_keys("Test_5_1")
self.id_wait(wd, "KeyPassphrase:4")
wd.find_element_by_id("KeyPassphrase:4").clear()
wd.find_element_by_id("KeyPassphrase:4").send_keys("1234567890")
self.id_wait(wd, "Btn_apply_WLANSSIDConf:4")
time.sleep(2)
self.id_wait(wd, "instName_WLANSSIDConf:5")
self.id_wait(wd, "ESSID:5")
wd.find_element_by_id("ESSID:5").clear()
wd.find_element_by_id("ESSID:5").send_keys("Test_5_2")
self.id_wait(wd, "KeyPassphrase:5")
wd.find_element_by_id("KeyPassphrase:5").clear()
wd.find_element_by_id("KeyPassphrase:5").send_keys("1234567890")
self.id_wait(wd, "MaxUserNum:5")
wd.find_element_by_id("MaxUserNum:5").clear()
wd.find_element_by_id("MaxUserNum:5").send_keys("2")
self.id_wait(wd, "topLine_WLANSSIDConf:5")
if not wd.find_element_by_id("Enable1:5").is_selected():
self.id_wait(wd, "Enable1:5")
self.id_wait(wd, "Btn_apply_WLANSSIDConf:5")
time.sleep(2)
self.id_wait(wd, "instName_WLANSSIDConf:6")
self.id_wait(wd, "ESSID:6")
wd.find_element_by_id("ESSID:6").clear()
wd.find_element_by_id("ESSID:6").send_keys("Test_5_3")
self.id_wait(wd, "KeyPassphrase:6")
wd.find_element_by_id("KeyPassphrase:6").clear()
wd.find_element_by_id("KeyPassphrase:6").send_keys("1234567890")
if not wd.find_element_by_id("VapIsolationEnable1:6").is_selected():
self.id_wait(wd, "VapIsolationEnable1:6")
self.id_wait(wd, "Btn_apply_WLANSSIDConf:6")
time.sleep(2)
self.id_wait(wd, "MaxUserNum:6")
wd.find_element_by_id("MaxUserNum:6").clear()
wd.find_element_by_id("MaxUserNum:6").send_keys("Test_5_4")
if not wd.find_element_by_id("Enable1:6").is_selected():
self.id_wait(wd, "Enable1:6")
self.id_wait(wd, "MaxUserNum:6")
wd.find_element_by_id("MaxUserNum:6").clear()
wd.find_element_by_id("MaxUserNum:6").send_keys("21")
self.id_wait(wd, "Btn_apply_WLANSSIDConf:6")
time.sleep(2)
self.id_wait(wd, "instName_WLANSSIDConf:7")
self.id_wait(wd, "ESSID:7")
wd.find_element_by_id("ESSID:7").clear()
wd.find_element_by_id("ESSID:7").send_keys("Test_5_4")
self.id_wait(wd, "KeyPassphrase:7")
wd.find_element_by_id("KeyPassphrase:7").clear()
wd.find_element_by_id("KeyPassphrase:7").send_keys("1234567890")
self.id_wait(wd, "Btn_apply_WLANSSIDConf:7")
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
#if __name__ == '__main__':
# unittest.main()
| apache-2.0 | 8,971,691,521,340,972,000 | 46.398876 | 99 | 0.603651 | false |
Kivvix/stage-LPC | compareSrc/data/sqlcl.py | 1 | 3255 | #!/usr/bin/python2
""">> sqlcl << command line query tool by Tamas Budavari <[email protected]>
Usage: sqlcl [options] sqlfile(s)
Options:
-s url : URL with the ASP interface (default: pha)
-f fmt : set output format (html,xml,csv - default: csv)
-q query : specify query on the command line
-l : skip first line of output with column names
-v : verbose mode dumps settings in header
-h : show this message"""
formats = ['csv','xml','html']
default_url='http://cas.sdss.org/stripe82/en/tools/search/x_sql.asp'
default_fmt='csv'
def usage(status, msg=''):
"Error message and usage"
print __doc__
if msg:
print '-- ERROR: %s' % msg
sys.exit(status)
def filtercomment(sql):
"Get rid of comments starting with --"
import os
fsql = ''
for line in sql.split('\n'):
fsql += line.split('--')[0] + ' ' + os.linesep;
return fsql
"""
def query(sql,url=default_url,fmt=default_fmt):
"Run query and return file object"
import urllib
fsql = filtercomment(sql)
params = urllib.urlencode({'cmd': fsql, 'format': fmt})
return urllib.urlopen(url+'?%s' % params)
"""
def query(sql,url=default_url,fmt=default_fmt):
"Run query and return file object"
import urllib
import time
import sys
fsql = filtercomment(sql)
params = urllib.urlencode({'cmd': fsql, 'format': fmt})
try:
r = urllib.urlopen(url+'?%s' % params)
except IOError:
i = 0
while i < 10 :
time.sleep(2)
print "try "+str(i)
try:
r = urllib.urlopen(url+'?%s' % params)
i = 2001
print str(i)
break
except:
i += 1
if i != 2001 :
sys.exit("Houston, we've had a problem...\n\tConnexion impossible, veuillez relancer le script plus tard.")
return r
def write_header(ofp,pre,url,qry):
import time
ofp.write('%s SOURCE: %s\n' % (pre,url))
ofp.write('%s TIME: %s\n' % (pre,time.asctime()))
ofp.write('%s QUERY:\n' % pre)
for l in qry.split('\n'):
ofp.write('%s %s\n' % (pre,l))
def main(argv):
"Parse command line and do it..."
import os, getopt, string
queries = []
url = os.getenv("SQLCLURL",default_url)
fmt = default_fmt
writefirst = 1
verbose = 0
# Parse command line
try:
optlist, args = getopt.getopt(argv[1:],'s:f:q:vlh?')
except getopt.error, e:
usage(1,e)
for o,a in optlist:
if o=='-s': url = a
elif o=='-f': fmt = a
elif o=='-q': queries.append(a)
elif o=='-l': writefirst = 0
elif o=='-v': verbose += 1
else: usage(0)
if fmt not in formats:
usage(1,'Wrong format!')
# Enqueue queries in files
for fname in args:
try:
queries.append(open(fname).read())
except IOError, e:
usage(1,e)
# Run all queries sequentially
for qry in queries:
ofp = sys.stdout
if verbose:
write_header(ofp,'#',url,qry)
file = query(qry,url,fmt)
# Output line by line (in case it's big)
line = file.readline()
if line.startswith("ERROR"): # SQL Statement Error -> stderr
ofp = sys.stderr
if writefirst:
ofp.write(string.rstrip(line)+os.linesep)
line = file.readline()
while line:
ofp.write(string.rstrip(line)+os.linesep)
line = file.readline()
if __name__=='__main__':
import sys
main(sys.argv)
| mit | 2,702,793,862,215,250,400 | 23.232558 | 110 | 0.612903 | false |
meck93/hs17-datavis-ex | ex4/ex4_task4.py | 1 | 1679 | # -*- coding: utf-8 -*-
"""
Data Visualization HS 17 - Exercise 4
Moritz Eck - 14-715-296
"""
import ex4_reader as data_reader
import matplotlib.pyplot as plt
import numpy as np
# z-value constants
MIN_HEIGHT = 0.035
MAX_HEIGHT = 19.835
STEP_SIZE = 0.2
# desired height
HEIGHT = 1.0
# x-values: list containing the heights of the plot
heights = np.arange(MIN_HEIGHT, MAX_HEIGHT + STEP_SIZE, STEP_SIZE)
# load the terrain data
terr_data = data_reader.read_terrain_data()
# create the plot
fig, plot = plt.subplots()
# set title of the window
fig.canvas.set_window_title("DataVis HS17 Ex04 - Task 1")
# create the axis and layout
plot.set_title("color filled terrain visualization")
plot.set_xlabel("longitude (X-coord)")
plot.set_ylabel("latitude (Y-coord)")
plot.set_aspect(1)
# invert the y axis
plot.invert_yaxis()
# plot the terrain data layer
terr_plot = plot.contourf(terr_data, cmap='terrain')
# colorbar for the terrain_plot
plot_colorbar = plt.colorbar(terr_plot)
plot_colorbar.set_label("elevation [m (above sea level)]")
# index for z-value at altitude: 1km
index = 0
# find the correct index for the HEIGHT
for i in range(len(heights)):
if heights[i] > HEIGHT:
index = i
break
# load the temperature data: hour 1
temp_hour_1 = data_reader.read_geo_data('TC', 1)
# y-values: list of temperature data for location (200, 250)
temp_data = temp_hour_1[:, :, index]
print(temp_data)
# plotting the temperature data
temp_plot = plot.contourf(temp_data, cmap="gist_heat_r", alpha=1.0)
# colorbar for the temp_plot
plot_colorbar = plt.colorbar(temp_plot)
plot_colorbar.set_label("temperature at 1km altitude")
# show both plots
plt.show() | mit | 3,054,575,912,814,792,000 | 22.661972 | 67 | 0.714116 | false |
arrabito/DIRAC | Interfaces/API/Dirac.py | 1 | 98785 | """
DIRAC API Class
All DIRAC functionality is exposed through the DIRAC API and this
serves as a source of documentation for the project via EpyDoc.
The DIRAC API provides the following functionality:
- A transparent and secure way for users
to submit jobs to the Grid, monitor them and
retrieve outputs
- Interaction with Grid storage and file catalogues
via the DataManagement public interfaces (more to be added)
- Local execution of workflows for testing purposes.
"""
import re
import os
import sys
import time
import shutil
import tempfile
import glob
import tarfile
import urllib
import StringIO
import DIRAC
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.API import API
from DIRAC.Interfaces.API.JobRepository import JobRepository
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.Core.Utilities.ModuleFactory import ModuleFactory
from DIRAC.WorkloadManagementSystem.Client.WMSClient import WMSClient
from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemSection, getServiceURL
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
from DIRAC.ConfigurationSystem.Client.LocalConfiguration import LocalConfiguration
from DIRAC.Core.Base.AgentReactor import AgentReactor
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.Core.Security import Locations
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities.File import mkDir
from DIRAC.Core.Utilities.PrettyPrint import printTable, printDict
__RCSID__ = "$Id$"
COMPONENT_NAME = 'DiracAPI'
def parseArguments(args):
argList = []
for arg in args:
argList += arg.split(',')
return argList
class Dirac(API):
"""
DIRAC API Class
"""
#############################################################################
def __init__(self, withRepo=False, repoLocation='', useCertificates=False, vo=None):
"""Internal initialization of the DIRAC API.
"""
super(Dirac, self).__init__()
self.section = '/LocalSite/'
self.jobRepo = False
if withRepo:
self.jobRepo = JobRepository(repoLocation)
if not self.jobRepo.isOK():
gLogger.error("Unable to write to supplied repository location")
self.jobRepo = False
self.useCertificates = useCertificates
# Determine the default file catalog
self.defaultFileCatalog = gConfig.getValue(self.section + '/FileCatalog', None)
self.vo = vo
def _checkFileArgument(self, fnList, prefix=None, single=False):
if prefix is None:
prefix = 'LFN'
if isinstance(fnList, basestring):
otherPrefix = 'LFN:' if prefix == 'PFN' else 'PFN:'
if otherPrefix in fnList:
return self._errorReport('Expected %s string, not %s') % (prefix, otherPrefix)
return S_OK(fnList.replace('%s:' % prefix, ''))
elif isinstance(fnList, list):
if single:
return self._errorReport('Expected single %s string' % prefix)
try:
return S_OK([fn.replace('%s:' % prefix, '') for fn in fnList])
except Exception as x:
return self._errorReport(str(x), 'Expected strings in list of %ss' % prefix)
else:
return self._errorReport('Expected single string or list of strings for %s(s)' % prefix)
def _checkJobArgument(self, jobID, multiple=False):
try:
if isinstance(jobID, (str, int, long)):
jobID = int(jobID)
if multiple:
jobID = [jobID]
elif isinstance(jobID, (list, dict)):
if multiple:
jobID = [int(job) for job in jobID]
else:
return self._errorReport('Expected int or string, not list')
return S_OK(jobID)
except Exception as x:
return self._errorReport(str(x), 'Expected %sinteger or string for existing jobID' %
'(list of) ' if multiple else '')
#############################################################################
# Repository specific methods
#############################################################################
def getRepositoryJobs(self, printOutput=False):
""" Retireve all the jobs in the repository
Example Usage:
>>> print dirac.getRepositoryJobs()
{'OK': True, 'Value': [1,2,3,4]}
:return: S_OK,S_ERROR
"""
if not self.jobRepo:
gLogger.warn("No repository is initialised")
return S_OK()
jobIDs = self.jobRepo.readRepository()['Value'].keys()
if printOutput:
print self.pPrint.pformat(jobIDs)
return S_OK(jobIDs)
def monitorRepository(self, printOutput=False):
"""Monitor the jobs present in the repository
Example Usage:
>>> print dirac.monitorRepository()
{'OK': True, 'Value': ''}
:returns: S_OK,S_ERROR
"""
if not self.jobRepo:
gLogger.warn("No repository is initialised")
return S_OK()
jobs = self.jobRepo.readRepository()['Value']
jobIDs = jobs.keys()
res = self.status(jobIDs)
if not res['OK']:
return self._errorReport(res['Message'], 'Failed to get status of jobs from WMS')
statusDict = {}
for jobDict in jobs.values():
state = jobDict.get('State', 'Unknown')
statusDict[state] = statusDict.setdefault(state, 0) + 1
if printOutput:
print self.pPrint.pformat(statusDict)
return S_OK(statusDict)
def retrieveRepositorySandboxes(self, requestedStates=None, destinationDirectory=''):
""" Obtain the output sandbox for the jobs in requested states in the repository
Example Usage:
>>> print dirac.retrieveRepositorySandboxes(requestedStates=['Done','Failed'],destinationDirectory='sandboxes')
{'OK': True, 'Value': ''}
:param requestedStates: List of jobs states to be considered
:type requestedStates: list of strings
:param destinationDirectory: The target directory to place sandboxes (each jobID will have a directory created beneath this)
:type destinationDirectory: string
:returns: S_OK,S_ERROR
"""
if not self.jobRepo:
gLogger.warn("No repository is initialised")
return S_OK()
if requestedStates is None:
requestedStates = ['Done', 'Failed', 'Completed'] # because users dont care about completed
jobs = self.jobRepo.readRepository()['Value']
for jobID in sorted(jobs):
jobDict = jobs[jobID]
if jobDict.get('State') in requestedStates:
# # Value of 'Retrieved' is a string, e.g. '0' when read from file
if not int(jobDict.get('Retrieved')):
self.getOutputSandbox(jobID, destinationDirectory)
return S_OK()
def retrieveRepositoryData(self, requestedStates=None, destinationDirectory=''):
""" Obtain the output data for the jobs in requested states in the repository
Example Usage:
>>> print dirac.retrieveRepositoryData(requestedStates=['Done'],destinationDirectory='outputData')
{'OK': True, 'Value': ''}
:param requestedStates: List of jobs states to be considered
:type requestedStates: list of strings
:param destinationDirectory: The target directory to place sandboxes (a directory is created for each JobID)
:type destinationDirectory: string
:returns: S_OK,S_ERROR
"""
if not self.jobRepo:
gLogger.warn("No repository is initialised")
return S_OK()
if requestedStates is None:
requestedStates = ['Done']
jobs = self.jobRepo.readRepository()['Value']
for jobID in sorted(jobs):
jobDict = jobs[jobID]
if jobDict.get('State') in requestedStates:
# # Value of 'OutputData' is a string, e.g. '0' when read from file
if not int(jobDict.get('OutputData')):
destDir = jobID
if destinationDirectory:
destDir = "%s/%s" % (destinationDirectory, jobID)
self.getJobOutputData(jobID, destinationDir=destDir)
return S_OK()
def removeRepository(self):
""" Removes the job repository and all sandboxes and output data retrieved
Example Usage:
>>> print dirac.removeRepository()
{'OK': True, 'Value': ''}
:returns: S_OK,S_ERROR
"""
if not self.jobRepo:
gLogger.warn("No repository is initialised")
return S_OK()
jobs = self.jobRepo.readRepository()['Value']
for jobID in sorted(jobs):
jobDict = jobs[jobID]
if os.path.exists(jobDict.get('Sandbox', '')):
shutil.rmtree(jobDict['Sandbox'], ignore_errors=True)
if 'OutputFiles' in jobDict:
for fileName in eval(jobDict['OutputFiles']):
if os.path.exists(fileName):
os.remove(fileName)
self.delete(sorted(jobs))
os.remove(self.jobRepo.getLocation()['Value'])
self.jobRepo = False
return S_OK()
def resetRepository(self, jobIDs=None):
""" Reset all the status of the (optionally supplied) jobs in the repository
Example Usage:
>>> print dirac.resetRepository(jobIDs = [1111,2222,'3333'])
{'OK': True, 'Value': ''}
:returns: S_OK,S_ERROR
"""
if not self.jobRepo:
gLogger.warn("No repository is initialised")
return S_OK()
if jobIDs is None:
jobIDs = []
if not isinstance(jobIDs, list):
return self._errorReport('The jobIDs must be a list of (strings or ints).')
self.jobRepo.resetRepository(jobIDs=jobIDs)
return S_OK()
#############################################################################
def submit(self, job, mode='wms'):
return self.submitJob(job, mode=mode)
def submitJob(self, job, mode='wms'):
"""Submit jobs to DIRAC (by default to the Worload Management System).
These can be either:
- Instances of the Job Class
- VO Application Jobs
- Inline scripts
- Scripts as executables
- Scripts inside an application environment
- JDL File
- JDL String
Example usage:
>>> print dirac.submitJob(job)
{'OK': True, 'Value': '12345'}
:param job: Instance of Job class or JDL string
:type job: ~DIRAC.Interfaces.API.Job.Job or str
:param mode: Submit job to WMS with mode = 'wms' (default),
'local' to run the workflow locally,
and 'agent' to run full Job Wrapper locally
:type mode: str
:returns: S_OK,S_ERROR
"""
self.__printInfo()
if isinstance(job, basestring):
if os.path.exists(job):
self.log.verbose('Found job JDL file %s' % (job))
with open(job, 'r') as fd:
jdlAsString = fd.read()
else:
self.log.verbose('Job is a JDL string')
jdlAsString = job
jobDescriptionObject = None
else: # we assume it is of type "DIRAC.Interfaces.API.Job.Job"
try:
formulationErrors = job.errorDict
except AttributeError as x:
self.log.verbose('Could not obtain job errors:%s' % (x))
formulationErrors = {}
if formulationErrors:
for method, errorList in formulationErrors.iteritems():
self.log.error('>>>> Error in %s() <<<<\n%s' % (method, '\n'.join(errorList)))
return S_ERROR(formulationErrors)
# Run any VO specific checks if desired prior to submission, this may or may not be overidden
# in a derived class for example
try:
result = self.preSubmissionChecks(job, mode)
if not result['OK']:
self.log.error('Pre-submission checks failed for job with message: "%s"' % (result['Message']))
return result
except BaseException as x:
msg = 'Error in VO specific function preSubmissionChecks: "%s"' % (x)
self.log.error(msg)
return S_ERROR(msg)
jobDescriptionObject = StringIO.StringIO(job._toXML()) # pylint: disable=protected-access
jdlAsString = job._toJDL(jobDescriptionObject=jobDescriptionObject) # pylint: disable=protected-access
if mode.lower() == 'local':
result = self.runLocal(job)
elif mode.lower() == 'agent':
self.log.info('Executing workflow locally with full WMS submission and DIRAC Job Agent')
result = self.runLocalAgent(jdlAsString, jobDescriptionObject)
elif mode.lower() == 'wms':
self.log.verbose('Will submit job to WMS') # this will happen by default anyway
result = WMSClient().submitJob(jdlAsString, jobDescriptionObject)
if not result['OK']:
self.log.error('Job submission failure', result['Message'])
elif self.jobRepo:
jobIDList = result['Value']
if not isinstance(jobIDList, list):
jobIDList = [jobIDList]
for jobID in jobIDList:
result = self.jobRepo.addJob(jobID, 'Submitted')
return result
#############################################################################
def __cleanTmp(self, cleanPath):
"""Remove tmp file or directory
"""
if not cleanPath:
return
if os.path.isfile(cleanPath):
os.unlink(cleanPath)
return
if os.path.isdir(cleanPath):
shutil.rmtree(cleanPath, ignore_errors=True)
return
self.__printOutput(sys.stdout, 'Could not remove %s' % str(cleanPath))
return
#############################################################################
def preSubmissionChecks(self, job, mode):
"""Internal function. The pre-submission checks method allows VOs to
make their own checks before job submission. To make use of this the
method should be overridden in a derived VO-specific Dirac class.
"""
return S_OK('Nothing to do')
#############################################################################
def runLocalAgent(self, jdl, jobDescriptionObject):
"""Internal function. This method is equivalent to submitJob(job,mode='Agent').
All output files are written to a <jobID> directory where <jobID> is the
result of submission to the WMS. Please note that the job must be eligible to the
site it is submitted from.
"""
jdl = self.__forceLocal(jdl)
jobID = WMSClient().submitJob(jdl, jobDescriptionObject)
if not jobID['OK']:
self.log.error('Job submission failure', jobID['Message'])
return S_ERROR('Could not submit job to WMS')
jobID = int(jobID['Value'])
self.log.info('The job has been submitted to the WMS with jobID = %s, monitoring starts.' % jobID)
result = self.__monitorSubmittedJob(jobID)
if not result['OK']:
self.log.info(result['Message'])
return result
self.log.info('Job %s is now eligible to be picked up from the WMS by a local job agent' % jobID)
# now run job agent targetted to pick up this job
result = self.__runJobAgent(jobID)
return result
@staticmethod
def __forceLocal(job):
"""Update Job description to avoid pilot submission by WMS
"""
if os.path.exists(job):
with open(job, 'r') as jdlFile:
jdl = jdlFile.read()
else:
jdl = job
if '[' not in jdl:
jdl = '[' + jdl + ']'
classAdJob = ClassAd(jdl)
classAdJob.insertAttributeString('Site', DIRAC.siteName())
classAdJob.insertAttributeString('SubmitPools', 'Local')
classAdJob.insertAttributeString('PilotTypes', 'private')
return classAdJob.asJDL()
#############################################################################
def __runJobAgent(self, jobID):
""" This internal method runs a tailored job agent for the local execution
of a previously submitted WMS job. The type of CEUniqueID can be overidden
via the configuration.
Currently must unset CMTPROJECTPATH to get this to work.
"""
agentName = 'WorkloadManagement/JobAgent'
self.log.verbose('In case being booted from a DIRAC script,'
' now resetting sys arguments to null from: \n%s' % (sys.argv))
sys.argv = []
localCfg = LocalConfiguration()
ceType = gConfig.getValue('/LocalSite/LocalCE', 'InProcess')
localCfg.addDefaultEntry('CEUniqueID', ceType)
localCfg.addDefaultEntry('ControlDirectory', os.getcwd())
localCfg.addDefaultEntry('MaxCycles', 1)
localCfg.addDefaultEntry('/LocalSite/WorkingDirectory', os.getcwd())
localCfg.addDefaultEntry('/LocalSite/MaxCPUTime', 300000)
localCfg.addDefaultEntry('/LocalSite/CPUTime', 300000)
localCfg.addDefaultEntry('/LocalSite/OwnerGroup', self.__getCurrentGroup())
# Running twice in the same process, the second time it use the initial JobID.
(fd, jobidCfg) = tempfile.mkstemp('.cfg', 'DIRAC_JobId', text=True)
os.write(fd, 'AgentJobRequirements\n {\n JobID = %s\n }\n' % jobID)
os.close(fd)
gConfig.loadFile(jobidCfg)
self.__cleanTmp(jobidCfg)
localCfg.addDefaultEntry('/AgentJobRequirements/PilotType', 'private')
ownerDN = self.__getCurrentDN()
ownerGroup = self.__getCurrentGroup()
# localCfg.addDefaultEntry('OwnerDN',ownerDN)
# localCfg.addDefaultEntry('OwnerGroup',ownerGroup)
# localCfg.addDefaultEntry('JobID',jobID)
localCfg.addDefaultEntry('/AgentJobRequirements/OwnerDN', ownerDN)
localCfg.addDefaultEntry('/AgentJobRequirements/OwnerGroup', ownerGroup)
localCfg.addDefaultEntry('/Resources/Computing/%s/PilotType' % ceType, 'private')
localCfg.addDefaultEntry('/Resources/Computing/%s/OwnerDN' % ceType, ownerDN)
localCfg.addDefaultEntry('/Resources/Computing/%s/OwnerGroup' % ceType, ownerGroup)
# localCfg.addDefaultEntry('/Resources/Computing/%s/JobID' %ceType,jobID)
# SKP can add compatible platforms here
localCfg.setConfigurationForAgent(agentName)
result = localCfg.loadUserData()
if not result['OK']:
self.log.error('There were errors when loading configuration', result['Message'])
return S_ERROR('Could not start DIRAC Job Agent')
agent = AgentReactor(agentName)
result = agent.runNumCycles(agentName, numCycles=1)
if not result['OK']:
self.log.error('Job Agent execution completed with errors', result['Message'])
return result
#############################################################################
def __getCurrentGroup(self):
"""Simple function to return current DIRAC group.
"""
proxy = Locations.getProxyLocation()
if not proxy:
return S_ERROR('No proxy found in local environment')
else:
self.log.verbose('Current proxy is %s' % proxy)
chain = X509Chain()
result = chain.loadProxyFromFile(proxy)
if not result['OK']:
return result
result = chain.getDIRACGroup()
if not result['OK']:
return result
group = result['Value']
self.log.verbose('Current group is %s' % group)
return group
#############################################################################
def __getCurrentDN(self):
"""Simple function to return current DN.
"""
proxy = Locations.getProxyLocation()
if not proxy:
return S_ERROR('No proxy found in local environment')
else:
self.log.verbose('Current proxy is %s' % proxy)
chain = X509Chain()
result = chain.loadProxyFromFile(proxy)
if not result['OK']:
return result
result = chain.getIssuerCert()
if not result['OK']:
return result
issuerCert = result['Value']
dn = issuerCert.getSubjectDN()['Value']
return dn
#############################################################################
def _runLocalJobAgent(self, jobID):
"""Developer function. In case something goes wrong with 'agent' submission, after
successful WMS submission, this takes the jobID and allows to retry the job agent
running.
"""
result = self.__monitorSubmittedJob(jobID)
if not result['OK']:
self.log.info(result['Message'])
return result
self.log.info('Job %s is now eligible to be picked up from the WMS by a local job agent' % jobID)
# now run job agent targetted to pick up this job
result = self.__runJobAgent(jobID)
return result
#############################################################################
def __monitorSubmittedJob(self, jobID):
"""Internal function. Monitors a submitted job until it is eligible to be
retrieved or enters a failed state.
"""
pollingTime = 10 # seconds
maxWaitingTime = 600 # seconds
start = time.time()
finalState = False
while not finalState:
jobStatus = self.status(jobID)
self.log.verbose(jobStatus)
if not jobStatus['OK']:
self.log.error('Could not monitor job status, will retry in %s seconds' % pollingTime, jobStatus['Message'])
else:
jobStatus = jobStatus['Value'][jobID]['Status']
if jobStatus.lower() == 'waiting':
finalState = True
return S_OK('Job is eligible to be picked up')
if jobStatus.lower() == 'failed':
finalState = True
return S_ERROR('Problem with job %s definition, WMS status is Failed' % jobID)
self.log.info('Current status for job %s is %s will retry in %s seconds' % (jobID, jobStatus, pollingTime))
current = time.time()
if current - start > maxWaitingTime:
finalState = True
return S_ERROR('Exceeded max waiting time of %s seconds for job %s to enter Waiting state,'
' exiting.' % (maxWaitingTime, jobID))
time.sleep(pollingTime)
#############################################################################
@staticmethod
def __getVOPolicyModule(module):
""" Utility to get the VO Policy module name
"""
moduleName = ''
setup = gConfig.getValue('/DIRAC/Setup', '')
vo = None
ret = getProxyInfo(disableVOMS=True)
if ret['OK'] and 'group' in ret['Value']:
vo = getVOForGroup(ret['Value']['group'])
if setup and vo:
moduleName = gConfig.getValue('DIRAC/VOPolicy/%s/%s/%s' % (vo, setup, module), '')
if not moduleName:
moduleName = gConfig.getValue('DIRAC/VOPolicy/%s' % module, '')
return moduleName
#############################################################################
def getInputDataCatalog(self, lfns, siteName='', fileName='pool_xml_catalog.xml', ignoreMissing=False):
"""This utility will create a pool xml catalogue slice for the specified LFNs using
the full input data resolution policy plugins for the VO.
If not specified the site is assumed to be the DIRAC.siteName() from the local
configuration. The fileName can be a full path.
Example usage:
>>> print print d.getInputDataCatalog('/lhcb/production/DC06/phys-v2-lumi5/00001680/DST/0000/00001680_00000490_5.dst',None,'myCat.xml')
{'Successful': {'<LFN>': {'pfntype': 'ROOT_All', 'protocol': 'SRM2',
'pfn': '<PFN>', 'turl': '<TURL>', 'guid': '3E3E097D-0AC0-DB11-9C0A-00188B770645',
'se': 'CERN-disk'}}, 'Failed': [], 'OK': True, 'Value': ''}
:param lfns: Logical File Name(s) to query
:type lfns: LFN str or python:list []
:param siteName: DIRAC site name
:type siteName: string
:param fileName: Catalogue name (can include path)
:type fileName: string
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(lfns, 'LFN')
if not ret['OK']:
return ret
lfns = ret['Value']
if not siteName:
siteName = DIRAC.siteName()
if ignoreMissing:
self.log.verbose('Ignore missing flag is enabled')
localSEList = getSEsForSite(siteName)
if not localSEList['OK']:
return localSEList
self.log.verbose(localSEList)
inputDataPolicy = self.__getVOPolicyModule('InputDataModule')
if not inputDataPolicy:
return self._errorReport('Could not retrieve DIRAC/VOPolicy/InputDataModule for VO')
self.log.info('Attempting to resolve data for %s' % siteName)
self.log.verbose('%s' % ('\n'.join(lfns)))
replicaDict = self.getReplicasForJobs(lfns)
if not replicaDict['OK']:
return replicaDict
catalogFailed = replicaDict['Value'].get('Failed', {})
guidDict = self.getMetadata(lfns)
if not guidDict['OK']:
return guidDict
for lfn, reps in replicaDict['Value']['Successful'].iteritems():
guidDict['Value']['Successful'][lfn].update(reps)
resolvedData = guidDict
diskSE = gConfig.getValue(self.section + '/DiskSE', ['-disk', '-DST', '-USER', '-FREEZER'])
tapeSE = gConfig.getValue(self.section + '/TapeSE', ['-tape', '-RDST', '-RAW'])
# Add catalog path / name here as well as site name to override the standard policy of resolving automatically
configDict = {'JobID': None,
'LocalSEList': localSEList['Value'],
'DiskSEList': diskSE,
'TapeSEList': tapeSE,
'SiteName': siteName,
'CatalogName': fileName}
self.log.verbose(configDict)
argumentsDict = {'FileCatalog': resolvedData, 'Configuration': configDict, 'InputData': lfns}
if ignoreMissing:
argumentsDict['IgnoreMissing'] = True
self.log.verbose(argumentsDict)
moduleFactory = ModuleFactory()
self.log.verbose('Input Data Policy Module: %s' % inputDataPolicy)
moduleInstance = moduleFactory.getModule(inputDataPolicy, argumentsDict)
if not moduleInstance['OK']:
self.log.warn('Could not create InputDataModule')
return moduleInstance
module = moduleInstance['Value']
result = module.execute()
self.log.debug(result)
if not result['OK']:
if 'Failed' in result:
self.log.error('Input data resolution failed for the following files:\n', '\n'.join(result['Failed']))
if catalogFailed:
self.log.error('Replicas not found for the following files:')
for key, value in catalogFailed.iteritems():
self.log.error('%s %s' % (key, value))
if 'Failed' in result:
result['Failed'] = catalogFailed.keys()
return result
#############################################################################
def runLocal(self, job):
""" Internal function. This method is called by DIRAC API function submitJob(job,mode='Local').
All output files are written to the local directory.
:param job: a job object
:type job: ~DIRAC.Interfaces.API.Job.Job
"""
self.log.notice('Executing workflow locally')
curDir = os.getcwd()
self.log.info('Executing from %s' % curDir)
jobDir = tempfile.mkdtemp(suffix='_JobDir', prefix='Local_', dir=curDir)
os.chdir(jobDir)
self.log.info('Executing job at temp directory %s' % jobDir)
tmpdir = tempfile.mkdtemp(prefix='DIRAC_')
self.log.verbose('Created temporary directory for submission %s' % (tmpdir))
jobXMLFile = tmpdir + '/jobDescription.xml'
self.log.verbose('Job XML file description is: %s' % jobXMLFile)
with open(jobXMLFile, 'w+') as fd:
fd.write(job._toXML()) # pylint: disable=protected-access
shutil.copy(jobXMLFile, '%s/%s' % (os.getcwd(), os.path.basename(jobXMLFile)))
res = self.__getJDLParameters(job)
if not res['OK']:
self.log.error("Could not extract job parameters from job")
return res
parameters = res['Value']
self.log.verbose("Job parameters: %s" % printDict(parameters))
inputDataRes = self._getLocalInputData(parameters)
if not inputDataRes['OK']:
return inputDataRes
inputData = inputDataRes['Value']
if inputData:
self.log.verbose("Job has input data: %s" % inputData)
localSEList = gConfig.getValue('/LocalSite/LocalSE', '')
if not localSEList:
return self._errorReport('LocalSite/LocalSE should be defined in your config file')
localSEList = localSEList.replace(' ', '').split(',')
self.log.debug("List of local SEs: %s" % localSEList)
inputDataPolicy = self.__getVOPolicyModule('InputDataModule')
if not inputDataPolicy:
return self._errorReport('Could not retrieve DIRAC/VOPolicy/InputDataModule for VO')
self.log.info('Job has input data requirement, will attempt to resolve data for %s' % DIRAC.siteName())
self.log.verbose('\n'.join(inputData if isinstance(inputData, (list, tuple)) else [inputData]))
replicaDict = self.getReplicasForJobs(inputData)
if not replicaDict['OK']:
return replicaDict
guidDict = self.getMetadata(inputData)
if not guidDict['OK']:
return guidDict
for lfn, reps in replicaDict['Value']['Successful'].iteritems():
guidDict['Value']['Successful'][lfn].update(reps)
resolvedData = guidDict
diskSE = gConfig.getValue(self.section + '/DiskSE', ['-disk', '-DST', '-USER', '-FREEZER'])
tapeSE = gConfig.getValue(self.section + '/TapeSE', ['-tape', '-RDST', '-RAW'])
configDict = {'JobID': None,
'LocalSEList': localSEList,
'DiskSEList': diskSE,
'TapeSEList': tapeSE}
self.log.verbose(configDict)
argumentsDict = {'FileCatalog': resolvedData,
'Configuration': configDict,
'InputData': inputData,
'Job': parameters}
self.log.verbose(argumentsDict)
moduleFactory = ModuleFactory()
moduleInstance = moduleFactory.getModule(inputDataPolicy, argumentsDict)
if not moduleInstance['OK']:
self.log.warn('Could not create InputDataModule')
return moduleInstance
module = moduleInstance['Value']
result = module.execute()
if not result['OK']:
self.log.warn('Input data resolution failed')
return result
softwarePolicy = self.__getVOPolicyModule('SoftwareDistModule')
if softwarePolicy:
moduleFactory = ModuleFactory()
moduleInstance = moduleFactory.getModule(softwarePolicy, {'Job': parameters})
if not moduleInstance['OK']:
self.log.warn('Could not create SoftwareDistModule')
return moduleInstance
module = moduleInstance['Value']
result = module.execute()
if not result['OK']:
self.log.warn('Software installation failed with result:\n%s' % (result))
return result
else:
self.log.verbose('Could not retrieve DIRAC/VOPolicy/SoftwareDistModule for VO')
sandbox = parameters.get('InputSandbox')
if sandbox:
self.log.verbose("Input Sandbox is %s" % sandbox)
if isinstance(sandbox, basestring):
sandbox = [isFile.strip() for isFile in sandbox.split(',')]
for isFile in sandbox:
self.log.debug("Resolving Input Sandbox %s" % isFile)
if isFile.lower().startswith("lfn:"): # isFile is an LFN
isFile = isFile[4:]
# Attempt to copy into job working directory, unless it is already there
if os.path.exists(os.path.join(os.getcwd(), os.path.basename(isFile))):
self.log.debug("Input Sandbox %s found in the job directory, no need to copy it" % isFile)
else:
if os.path.isabs(isFile) and os.path.exists(isFile):
self.log.debug("Input Sandbox %s is a file with absolute path, copying it" % isFile)
shutil.copy(isFile, os.getcwd())
elif os.path.isdir(isFile):
self.log.debug("Input Sandbox %s is a directory, found in the user working directory, copying it" % isFile)
shutil.copytree(isFile, os.path.basename(isFile), symlinks=True)
elif os.path.exists(os.path.join(curDir, os.path.basename(isFile))):
self.log.debug("Input Sandbox %s found in the submission directory, copying it" % isFile)
shutil.copy(os.path.join(curDir, os.path.basename(isFile)), os.getcwd())
elif os.path.exists(os.path.join(tmpdir, isFile)): # if it is in the tmp dir
self.log.debug("Input Sandbox %s is a file, found in the tmp directory, copying it" % isFile)
shutil.copy(os.path.join(tmpdir, isFile), os.getcwd())
else:
self.log.verbose("perhaps the file %s is in an LFN, so we attempt to download it." % isFile)
getFile = self.getFile(isFile)
if not getFile['OK']:
self.log.warn('Failed to download %s with error: %s' % (isFile, getFile['Message']))
return S_ERROR('Can not copy InputSandbox file %s' % isFile)
isFileInCWD = os.getcwd() + os.path.sep + isFile
basefname = os.path.basename(isFileInCWD)
if tarfile.is_tarfile(basefname):
try:
with tarfile.open(basefname, 'r') as tf:
for member in tf.getmembers():
tf.extract(member, os.getcwd())
except (tarfile.ReadError, tarfile.CompressionError, tarfile.ExtractError) as x:
return S_ERROR('Could not untar or extract %s with exception %s' % (basefname, repr(x)))
self.log.info('Attempting to submit job to local site: %s' % DIRAC.siteName())
if 'Executable' in parameters:
executable = os.path.expandvars(parameters['Executable'])
else:
return self._errorReport('Missing job "Executable"')
arguments = parameters.get('Arguments', '')
# Replace argument placeholders for parametric jobs
# if we have Parameters then we have a parametric job
if 'Parameters' in parameters:
for par, value in parameters.iteritems():
if par.startswith('Parameters.'):
# we just use the first entry in all lists to run one job
parameters[par[len('Parameters.'):]] = value[0]
arguments = arguments % parameters
command = '%s %s' % (executable, arguments)
# If not set differently in the CS use the root from the current DIRAC installation
siteRoot = gConfig.getValue('/LocalSite/Root', DIRAC.rootPath)
os.environ['DIRACROOT'] = siteRoot
self.log.verbose('DIRACROOT = %s' % (siteRoot))
os.environ['DIRACPYTHON'] = sys.executable
self.log.verbose('DIRACPYTHON = %s' % (sys.executable))
self.log.info('Executing: %s' % command)
executionEnv = dict(os.environ)
variableList = parameters.get('ExecutionEnvironment')
if variableList:
self.log.verbose('Adding variables to execution environment')
if isinstance(variableList, basestring):
variableList = [variableList]
for var in variableList:
nameEnv = var.split('=')[0]
valEnv = urllib.unquote(var.split('=')[1]) # this is needed to make the value contain strange things
executionEnv[nameEnv] = valEnv
self.log.verbose('%s = %s' % (nameEnv, valEnv))
cbFunction = self.__printOutput
result = shellCall(0, command, env=executionEnv, callbackFunction=cbFunction)
if not result['OK']:
return result
status = result['Value'][0]
self.log.verbose('Status after execution is %s' % (status))
# FIXME: if there is an callbackFunction, StdOutput and StdError will be empty soon
outputFileName = parameters.get('StdOutput')
errorFileName = parameters.get('StdError')
if outputFileName:
stdout = result['Value'][1]
if os.path.exists(outputFileName):
os.remove(outputFileName)
self.log.info('Standard output written to %s' % (outputFileName))
with open(outputFileName, 'w') as outputFile:
print >> outputFile, stdout
else:
self.log.warn('Job JDL has no StdOutput file parameter defined')
if errorFileName:
stderr = result['Value'][2]
if os.path.exists(errorFileName):
os.remove(errorFileName)
self.log.verbose('Standard error written to %s' % (errorFileName))
with open(errorFileName, 'w') as errorFile:
print >> errorFile, stderr
sandbox = None
else:
self.log.warn('Job JDL has no StdError file parameter defined')
sandbox = parameters.get('OutputSandbox')
if sandbox:
if isinstance(sandbox, basestring):
sandbox = [osFile.strip() for osFile in sandbox.split(',')]
for i in sandbox:
globList = glob.glob(i)
for osFile in globList:
if os.path.isabs(osFile):
# if a relative path, it is relative to the user working directory
osFile = os.path.basename(osFile)
# Attempt to copy back from job working directory
if os.path.isdir(osFile):
shutil.copytree(osFile, curDir, symlinks=True)
elif os.path.exists(osFile):
shutil.copy(osFile, curDir)
else:
return S_ERROR('Can not copy OutputSandbox file %s' % osFile)
os.chdir(curDir)
if status: # if it fails, copy content of execution dir in current directory
destDir = os.path.join(curDir, os.path.basename(os.path.dirname(tmpdir)))
self.log.debug("Copying outputs from %s to %s" % (tmpdir, destDir))
if os.path.exists(destDir):
shutil.rmtree(destDir)
shutil.copytree(tmpdir, destDir)
self.log.verbose('Cleaning up %s...' % tmpdir)
self.__cleanTmp(tmpdir)
if status:
return S_ERROR('Execution completed with non-zero status %s' % (status))
return S_OK('Execution completed successfully')
@staticmethod
def _getLocalInputData(parameters):
""" Resolve input data for locally run jobs.
Here for reason of extensibility
"""
inputData = parameters.get('InputData')
if inputData:
if isinstance(inputData, basestring):
inputData = [inputData]
return S_OK(inputData)
#############################################################################
@staticmethod
def __printOutput(fd=None, message=''):
"""Internal callback function to return standard output when running locally.
"""
if fd:
if isinstance(fd, (int, long)):
if fd == 0:
print >> sys.stdout, message
elif fd == 1:
print >> sys.stderr, message
else:
print message
elif isinstance(fd, file):
print >> fd, message
else:
print message
#############################################################################
# def listCatalog( self, directory, printOutput = False ):
# """ Under development.
# Obtain listing of the specified directory.
# """
# rm = ReplicaManager()
# listing = rm.listCatalogDirectory( directory )
# if re.search( '\/$', directory ):
# directory = directory[:-1]
#
# if printOutput:
# for fileKey, metaDict in listing['Value']['Successful'][directory]['Files'].iteritems():
# print '#' * len( fileKey )
# print fileKey
# print '#' * len( fileKey )
# print self.pPrint.pformat( metaDict )
#############################################################################
def getReplicas(self, lfns, active=True, preferDisk=False, diskOnly=False, printOutput=False):
"""Obtain replica information from file catalogue client. Input LFN(s) can be string or list.
Example usage:
>>> print dirac.getReplicas('/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst')
{'OK': True, 'Value': {'Successful': {'/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst':
{'CERN-RDST':
'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst'}},
'Failed': {}}}
:param lfns: Logical File Name(s) to query
:type lfns: LFN str or python:list []
:param active: restrict to only replicas at SEs that are not banned
:type active: boolean
:param preferDisk: give preference to disk replicas if True
:type preferDisk: boolean
:param diskOnly: restrict to only disk replicas if True
:type diskOnly: boolean
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(lfns, 'LFN')
if not ret['OK']:
return ret
lfns = ret['Value']
start = time.time()
dm = DataManager()
repsResult = dm.getReplicas(lfns, active=active, preferDisk=preferDisk, diskOnly=diskOnly)
timing = time.time() - start
self.log.info('Replica Lookup Time: %.2f seconds ' % (timing))
self.log.debug(repsResult)
if not repsResult['OK']:
self.log.warn(repsResult['Message'])
return repsResult
if printOutput:
fields = ['LFN', 'StorageElement', 'URL']
records = []
for lfn in repsResult['Value']['Successful']:
lfnPrint = lfn
for se, url in repsResult['Value']['Successful'][lfn].iteritems():
records.append((lfnPrint, se, url))
lfnPrint = ''
for lfn in repsResult['Value']['Failed']:
records.append((lfn, 'Unknown', str(repsResult['Value']['Failed'][lfn])))
printTable(fields, records, numbering=False)
return repsResult
def getReplicasForJobs(self, lfns, diskOnly=False, printOutput=False):
"""Obtain replica information from file catalogue client. Input LFN(s) can be string or list.
Example usage:
>>> print dirac.getReplicasForJobs('/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst')
{'OK': True, 'Value': {'Successful': {'/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst':
{'CERN-RDST':
'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst'}},
'Failed': {}}}
:param lfns: Logical File Name(s) to query
:type lfns: LFN str or python:list []
:param diskOnly: restrict to only disk replicas if True
:type diskOnly: boolean
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(lfns, 'LFN')
if not ret['OK']:
return ret
lfns = ret['Value']
start = time.time()
dm = DataManager()
repsResult = dm.getReplicasForJobs(lfns, diskOnly=diskOnly)
timing = time.time() - start
self.log.info('Replica Lookup Time: %.2f seconds ' % (timing))
self.log.debug(repsResult)
if not repsResult['OK']:
self.log.warn(repsResult['Message'])
return repsResult
if printOutput:
fields = ['LFN', 'StorageElement', 'URL']
records = []
for lfn in repsResult['Value']['Successful']:
lfnPrint = lfn
for se, url in repsResult['Value']['Successful'][lfn].iteritems():
records.append((lfnPrint, se, url))
lfnPrint = ''
for lfn in repsResult['Value']['Failed']:
records.append((lfn, 'Unknown', str(repsResult['Value']['Failed'][lfn])))
printTable(fields, records, numbering=False)
return repsResult
#############################################################################
def getAllReplicas(self, lfns, printOutput=False):
"""Only differs from getReplicas method in the sense that replicas on banned SEs
will be included in the result.
Obtain replica information from file catalogue client. Input LFN(s) can be string or list.
Example usage:
>>> print dirac.getAllReplicas('/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst')
{'OK': True, 'Value': {'Successful': {'/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst':
{'CERN-RDST':
'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst'}},
'Failed': {}}}
:param lfns: Logical File Name(s) to query
:type lfns: LFN str or python:list
:param printOutput: Optional flag to print result
:type printOutput: bool
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(lfns, 'LFN')
if not ret['OK']:
return ret
lfns = ret['Value']
# rm = ReplicaManager()
# start = time.time()
# repsResult = rm.getCatalogReplicas( lfns )
# RF_NOTE : this method will return different values that api.getReplicas
fc = FileCatalog()
start = time.time()
repsResult = fc.getReplicas(lfns)
timing = time.time() - start
self.log.info('Replica Lookup Time: %.2f seconds ' % (timing))
self.log.verbose(repsResult)
if not repsResult['OK']:
self.log.warn(repsResult['Message'])
return repsResult
if printOutput:
print self.pPrint.pformat(repsResult['Value'])
return repsResult
def checkSEAccess(self, se, access='Write'):
""" returns the value of a certain SE status flag (access or other)
:param se: Storage Element name
:type se: string
:param access: type of access
:type access: string in ('Read', 'Write', 'Remove', 'Check')
: returns: True or False
"""
return StorageElement(se, vo=self.vo).status().get(access, False)
#############################################################################
def splitInputData(self, lfns, maxFilesPerJob=20, printOutput=False):
"""Split the supplied lfn list by the replicas present at the possible
destination sites. An S_OK object will be returned containing a list of
lists in order to create the jobs.
Example usage:
>>> d.splitInputData(lfns,10)
{'OK': True, 'Value': [['<LFN>'], ['<LFN>']]}
:param lfns: Logical File Name(s) to split
:type lfns: python:list
:param maxFilesPerJob: Number of files per bunch
:type maxFilesPerJob: integer
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
from DIRAC.Core.Utilities.SiteSEMapping import getSitesForSE
sitesForSE = {}
ret = self._checkFileArgument(lfns, 'LFN')
if not ret['OK']:
return ret
lfns = ret['Value']
if not isinstance(maxFilesPerJob, (int, long)):
try:
maxFilesPerJob = int(maxFilesPerJob)
except Exception as x:
return self._errorReport(str(x), 'Expected integer for maxFilesPerJob')
replicaDict = self.getReplicasForJobs(lfns)
if not replicaDict['OK']:
return replicaDict
if not replicaDict['Value']['Successful']:
return self._errorReport(replicaDict['Value']['Failed'].items()[0], 'Failed to get replica information')
siteLfns = {}
for lfn, reps in replicaDict['Value']['Successful'].iteritems():
possibleSites = set(site for se in reps for site in (
sitesForSE[se] if se in sitesForSE else sitesForSE.setdefault(se, getSitesForSE(se).get('Value', []))))
siteLfns.setdefault(','.join(sorted(possibleSites)), []).append(lfn)
if '' in siteLfns:
# Some files don't have active replicas
return self._errorReport('No active replica found for', str(siteLfns['']))
lfnGroups = []
for files in siteLfns.values():
lists = breakListIntoChunks(files, maxFilesPerJob)
lfnGroups += lists
if printOutput:
print self.pPrint.pformat(lfnGroups)
return S_OK(lfnGroups)
#############################################################################
def getMetadata(self, lfns, printOutput=False):
return self.getLfnMetadata(lfns, printOutput=printOutput)
def getLfnMetadata(self, lfns, printOutput=False):
"""Obtain replica metadata from file catalogue client. Input LFN(s) can be string or list.
Example usage:
>>> print dirac.getMetadata('/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst')
{'OK': True, 'Value': {'Successful': {'/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst':
{'Status': '-', 'Size': 619475828L, 'GUID': 'E871FBA6-71EA-DC11-8F0C-000E0C4DEB4B', 'ChecksumType': 'AD',
'CheckSumValue': ''}}, 'Failed': {}}}
:param lfns: Logical File Name(s) to query
:type lfns: LFN str or python:list []
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(lfns, 'LFN')
if not ret['OK']:
return ret
lfns = ret['Value']
fc = FileCatalog()
start = time.time()
repsResult = fc.getFileMetadata(lfns)
timing = time.time() - start
self.log.info('Metadata Lookup Time: %.2f seconds ' % (timing))
self.log.verbose(repsResult)
if not repsResult['OK']:
self.log.warn('Failed to retrieve file metadata from the catalogue')
self.log.warn(repsResult['Message'])
return repsResult
if printOutput:
print self.pPrint.pformat(repsResult['Value'])
return repsResult
#############################################################################
def addFile(self, lfn, fullPath, diracSE, fileGuid=None, printOutput=False):
"""Add a single file to Grid storage. lfn is the desired logical file name
for the file, fullPath is the local path to the file and diracSE is the
Storage Element name for the upload. The fileGuid is optional, if not
specified a GUID will be generated on the fly. If subsequent access
depends on the file GUID the correct one should
Example Usage:
>>> print dirac.addFile('/lhcb/user/p/paterson/myFile.tar.gz','myFile.tar.gz','CERN-USER')
{'OK': True, 'Value':{'Failed': {},
'Successful': {'/lhcb/user/p/paterson/test/myFile.tar.gz': {'put': 64.246301889419556,
'register': 1.1102778911590576}}}}
:param lfn: Logical File Name (LFN)
:type lfn: string
:param diracSE: DIRAC SE name e.g. CERN-USER
:type diracSE: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(lfn, 'LFN', single=True)
if not ret['OK']:
return ret
lfn = ret['Value']
if not os.path.exists(fullPath):
return self._errorReport('Local file %s does not exist' % (fullPath))
if not os.path.isfile(fullPath):
return self._errorReport('Expected path to file not %s' % (fullPath))
dm = DataManager(catalogs=self.defaultFileCatalog)
result = dm.putAndRegister(lfn, fullPath, diracSE, guid=fileGuid)
if not result['OK']:
return self._errorReport('Problem during putAndRegister call', result['Message'])
if printOutput:
print self.pPrint.pformat(result['Value'])
return result
#############################################################################
def getFile(self, lfn, destDir='', printOutput=False):
"""Retrieve a single file or list of files from Grid storage to the current directory. lfn is the
desired logical file name for the file, fullPath is the local path to the file and diracSE is the
Storage Element name for the upload. The fileGuid is optional, if not specified a GUID will be
generated on the fly.
Example Usage:
>>> print dirac.getFile('/lhcb/user/p/paterson/myFile.tar.gz')
{'OK': True, 'Value':{'Failed': {},
'Successful': {'/lhcb/user/p/paterson/test/myFile.tar.gz': '/afs/cern.ch/user/p/paterson/myFile.tar.gz'}}}
:param lfn: Logical File Name (LFN)
:type lfn: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(lfn, 'LFN')
if not ret['OK']:
return ret
lfn = ret['Value']
dm = DataManager()
result = dm.getFile(lfn, destinationDir=destDir)
if not result['OK']:
return self._errorReport('Problem during getFile call', result['Message'])
if result['Value']['Failed']:
self.log.error('Failures occurred during rm.getFile')
if printOutput:
print self.pPrint.pformat(result['Value'])
return S_ERROR(result['Value'])
if printOutput:
print self.pPrint.pformat(result['Value'])
return result
#############################################################################
def replicateFile(self, lfn, destinationSE, sourceSE='', localCache='', printOutput=False):
"""Replicate an existing file to another Grid SE. lfn is the desired logical file name
for the file to be replicated, destinationSE is the DIRAC Storage Element to create a
replica of the file at. Optionally the source storage element and local cache for storing
the retrieved file for the new upload can be specified.
Example Usage:
>>> print dirac.replicateFile('/lhcb/user/p/paterson/myFile.tar.gz','CNAF-USER')
{'OK': True, 'Value':{'Failed': {},
'Successful': {'/lhcb/user/p/paterson/test/myFile.tar.gz': {'register': 0.44766902923583984,
'replicate': 56.42345404624939}}}}
:param lfn: Logical File Name (LFN)
:type lfn: string
:param destinationSE: Destination DIRAC SE name e.g. CERN-USER
:type destinationSE: string
:param sourceSE: Optional source SE
:type sourceSE: string
:param localCache: Optional path to local cache
:type localCache: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(lfn, 'LFN', single=True)
if not ret['OK']:
return ret
lfn = ret['Value']
if not sourceSE:
sourceSE = ''
if not localCache:
localCache = ''
if not isinstance(sourceSE, basestring):
return self._errorReport('Expected string for source SE name')
if not isinstance(localCache, basestring):
return self._errorReport('Expected string for path to local cache')
localFile = os.path.join(localCache, os.path.basename(lfn))
if os.path.exists(localFile):
return self._errorReport('A local file "%s" with the same name as the remote file exists. '
'Cannot proceed with replication:\n'
' Go to a different working directory\n'
' Move it different directory or use a different localCache\n'
' Delete the file yourself'
'' % localFile)
dm = DataManager()
result = dm.replicateAndRegister(lfn, destinationSE, sourceSE, '', localCache)
if not result['OK']:
return self._errorReport('Problem during replicateFile call', result['Message'])
if printOutput:
print self.pPrint.pformat(result['Value'])
return result
def replicate(self, lfn, destinationSE, sourceSE='', printOutput=False):
"""Replicate an existing file to another Grid SE. lfn is the desired logical file name
for the file to be replicated, destinationSE is the DIRAC Storage Element to create a
replica of the file at. Optionally the source storage element and local cache for storing
the retrieved file for the new upload can be specified.
Example Usage:
>>> print dirac.replicate('/lhcb/user/p/paterson/myFile.tar.gz','CNAF-USER')
{'OK': True, 'Value':{'Failed': {},
'Successful': {'/lhcb/user/p/paterson/test/myFile.tar.gz': {'register': 0.44766902923583984}}}}
:param lfn: Logical File Name (LFN)
:type lfn: string
:param destinationSE: Destination DIRAC SE name e.g. CERN-USER
:type destinationSE: string
:param sourceSE: Optional source SE
:type sourceSE: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(lfn, 'LFN', single=True)
if not ret['OK']:
return ret
lfn = ret['Value']
if not sourceSE:
sourceSE = ''
if not isinstance(sourceSE, basestring):
return self._errorReport('Expected string for source SE name')
dm = DataManager()
result = dm.replicate(lfn, destinationSE, sourceSE, '')
if not result['OK']:
return self._errorReport('Problem during replicate call', result['Message'])
if printOutput:
print self.pPrint.pformat(result['Value'])
return result
#############################################################################
def getAccessURL(self, lfn, storageElement, printOutput=False, protocol=False):
"""Allows to retrieve an access URL for an LFN replica given a valid DIRAC SE
name. Contacts the file catalog and contacts the site SRM endpoint behind
the scenes.
Example Usage:
>>> print dirac.getAccessURL('/lhcb/data/CCRC08/DST/00000151/0000/00000151_00004848_2.dst','CERN-RAW')
{'OK': True, 'Value': {'Successful': {'srm://...': {'SRM2': 'rfio://...'}}, 'Failed': {}}}
:param lfn: Logical File Name (LFN)
:type lfn: str or python:list
:param storageElement: DIRAC SE name e.g. CERN-RAW
:type storageElement: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:param protocol: protocol requested
:type protocol: str or python:list
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(lfn, 'LFN')
if not ret['OK']:
return ret
lfn = ret['Value']
dm = DataManager()
result = dm.getReplicaAccessUrl(lfn, storageElement, protocol=protocol)
if not result['OK']:
return self._errorReport('Problem during getAccessURL call', result['Message'])
if printOutput:
print self.pPrint.pformat(result['Value'])
return result
#############################################################################
def getPhysicalFileAccessURL(self, pfn, storageElement, printOutput=False):
"""Allows to retrieve an access URL for an PFN given a valid DIRAC SE
name. The SE is contacted directly for this information.
Example Usage:
>>> print dirac.getPhysicalFileAccessURL('srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/CCRC08/DST/00000151/0000/00000151_00004848_2.dst','CERN_M-DST')
{'OK': True, 'Value':{'Failed': {},
'Successful': {'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/CCRC08/DST/00000151/0000/00000151_00004848_2.dst': {'RFIO': 'castor://...'}}}}
:param pfn: Physical File Name (PFN)
:type pfn: str or python:list
:param storageElement: DIRAC SE name e.g. CERN-RAW
:type storageElement: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(pfn, 'PFN')
if not ret['OK']:
return ret
pfn = ret['Value']
result = StorageElement(storageElement).getURL([pfn])
if not result['OK']:
return self._errorReport('Problem during getAccessURL call', result['Message'])
if printOutput:
print self.pPrint.pformat(result['Value'])
return result
#############################################################################
def getPhysicalFileMetadata(self, pfn, storageElement, printOutput=False):
"""Allows to retrieve metadata for physical file(s) on a supplied storage
element. Contacts the site SRM endpoint and performs a gfal_ls behind
the scenes.
Example Usage:
>>> print dirac.getPhysicalFileMetadata('srm://srm.grid.sara.nl/pnfs/grid.sara.nl/data
/lhcb/data/CCRC08/RAW/LHCb/CCRC/23341/023341_0000039571.raw','NIKHEF-RAW')
{'OK': True, 'Value': {'Successful': {'srm://...': {'SRM2': 'rfio://...'}}, 'Failed': {}}}
:param pfn: Physical File Name (PFN)
:type pfn: str or python:list
:param storageElement: DIRAC SE name e.g. CERN-RAW
:type storageElement: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(pfn, 'PFN')
if not ret['OK']:
return ret
pfn = ret['Value']
result = StorageElement(storageElement).getFileMetadata(pfn)
if not result['OK']:
return self._errorReport('Problem during getStorageFileMetadata call', result['Message'])
if printOutput:
print self.pPrint.pformat(result['Value'])
return result
#############################################################################
def removeFile(self, lfn, printOutput=False):
"""Remove LFN and *all* associated replicas from Grid Storage Elements and
file catalogues.
Example Usage:
>>> print dirac.removeFile('LFN:/lhcb/data/CCRC08/RAW/LHCb/CCRC/22808/022808_0000018443.raw')
{'OK': True, 'Value':...}
:param lfn: Logical File Name (LFN)
:type lfn: string
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(lfn, 'LFN')
if not ret['OK']:
return ret
lfn = ret['Value']
dm = DataManager()
result = dm.removeFile(lfn)
if printOutput and result['OK']:
print self.pPrint.pformat(result['Value'])
return result
#############################################################################
def removeReplica(self, lfn, storageElement, printOutput=False):
"""Remove replica of LFN from specified Grid Storage Element and
file catalogues.
Example Usage:
>>> print dirac.removeReplica('LFN:/lhcb/user/p/paterson/myDST.dst','CERN-USER')
{'OK': True, 'Value':...}
:param lfn: Logical File Name (LFN)
:type lfn: string
:param storageElement: DIRAC SE Name
:type storageElement: string
:returns: S_OK,S_ERROR
"""
ret = self._checkFileArgument(lfn, 'LFN')
if not ret['OK']:
return ret
lfn = ret['Value']
dm = DataManager()
result = dm.removeReplica(storageElement, lfn)
if printOutput and result['OK']:
print self.pPrint.pformat(result['Value'])
return result
#############################################################################
def getInputSandbox(self, jobID, outputDir=None):
"""Retrieve input sandbox for existing JobID.
This method allows the retrieval of an existing job input sandbox for
debugging purposes. By default the sandbox is downloaded to the current
directory but this can be overidden via the outputDir parameter. All files
are extracted into a InputSandbox<JOBID> directory that is automatically created.
Example Usage:
>>> print dirac.getInputSandbox(12345)
{'OK': True, 'Value': ['Job__Sandbox__.tar.bz2']}
:param jobID: JobID
:type jobID: integer or string
:param outputDir: Optional directory for files
:type outputDir: string
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=False)
if not ret['OK']:
return ret
jobID = ret['Value']
# TODO: Do not check if dir already exists
dirPath = ''
if outputDir:
dirPath = '%s/InputSandbox%s' % (outputDir, jobID)
if os.path.exists(dirPath):
return self._errorReport('Job input sandbox directory %s already exists' % (dirPath))
else:
dirPath = '%s/InputSandbox%s' % (os.getcwd(), jobID)
if os.path.exists(dirPath):
return self._errorReport('Job input sandbox directory %s already exists' % (dirPath))
try:
os.mkdir(dirPath)
except Exception as x:
return self._errorReport(str(x), 'Could not create directory in %s' % (dirPath))
result = SandboxStoreClient(useCertificates=self.useCertificates).downloadSandboxForJob(jobID, 'Input', dirPath)
if not result['OK']:
self.log.warn(result['Message'])
else:
self.log.info('Files retrieved and extracted in %s' % (dirPath))
return result
#############################################################################
def getOutputSandbox(self, jobID, outputDir=None, oversized=True, noJobDir=False):
"""Retrieve output sandbox for existing JobID.
This method allows the retrieval of an existing job output sandbox.
By default the sandbox is downloaded to the current directory but
this can be overidden via the outputDir parameter. All files are
extracted into a <JOBID> directory that is automatically created.
Example Usage:
>>> print dirac.getOutputSandbox(12345)
{'OK': True, 'Value': ['Job__Sandbox__.tar.bz2']}
:param jobID: JobID
:type jobID: integer or string
:param outputDir: Optional directory path
:type outputDir: string
:param oversized: Optionally disable oversized sandbox download
:type oversized: boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=False)
if not ret['OK']:
return ret
jobID = ret['Value']
dirPath = ''
if outputDir:
dirPath = outputDir
if not noJobDir:
dirPath = '%s/%s' % (outputDir, jobID)
else:
dirPath = '%s/%s' % (os.getcwd(), jobID)
if os.path.exists(dirPath):
return self._errorReport('Job output directory %s already exists' % (dirPath))
mkDir(dirPath)
# New download
result = SandboxStoreClient(useCertificates=self.useCertificates).downloadSandboxForJob(jobID, 'Output', dirPath)
if result['OK']:
self.log.info('Files retrieved and extracted in %s' % (dirPath))
if self.jobRepo:
self.jobRepo.updateJob(jobID, {'Retrieved': 1, 'Sandbox': os.path.realpath(dirPath)})
return result
self.log.warn(result['Message'])
if not oversized:
if self.jobRepo:
self.jobRepo.updateJob(jobID, {'Retrieved': 1, 'Sandbox': os.path.realpath(dirPath)})
return result
params = self.parameters(int(jobID))
if not params['OK']:
self.log.verbose('Could not retrieve job parameters to check for oversized sandbox')
return params
if not params['Value'].get('OutputSandboxLFN'):
self.log.verbose('No oversized output sandbox for job %s:\n%s' % (jobID, params))
return result
oversizedSandbox = params['Value']['OutputSandboxLFN']
if not oversizedSandbox:
self.log.verbose('Null OutputSandboxLFN for job %s' % jobID)
return result
self.log.info('Attempting to retrieve %s' % oversizedSandbox)
start = os.getcwd()
os.chdir(dirPath)
getFile = self.getFile(oversizedSandbox)
if not getFile['OK']:
self.log.warn('Failed to download %s with error:%s' % (oversizedSandbox, getFile['Message']))
os.chdir(start)
return getFile
fileName = os.path.basename(oversizedSandbox)
result = S_OK()
if tarfile.is_tarfile(fileName):
try:
with tarfile.open(fileName, 'r') as tf:
for member in tf.getmembers():
tf.extract(member, dirPath)
except Exception as x:
os.chdir(start)
result = S_ERROR(str(x))
if os.path.exists(fileName):
os.unlink(fileName)
os.chdir(start)
if result['OK']:
if self.jobRepo:
self.jobRepo.updateJob(jobID, {'Retrieved': 1, 'Sandbox': os.path.realpath(dirPath)})
return result
#############################################################################
def delete(self, jobID):
return self.deleteJob(jobID)
def deleteJob(self, jobID):
"""Delete job or list of jobs from the WMS, if running these jobs will
also be killed.
Example Usage:
>>> print dirac.delete(12345)
{'OK': True, 'Value': [12345]}
:param jobID: JobID
:type jobID: int, str or python:list
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=True)
if not ret['OK']:
return ret
jobID = ret['Value']
result = WMSClient().deleteJob(jobID)
if result['OK']:
if self.jobRepo:
for jobID in result['Value']:
self.jobRepo.removeJob(jobID)
return result
#############################################################################
def reschedule(self, jobID):
return self.rescheduleJob(jobID)
def rescheduleJob(self, jobID):
"""Reschedule a job or list of jobs in the WMS. This operation is the same
as resubmitting the same job as new. The rescheduling operation may be
performed to a configurable maximum number of times but the owner of a job
can also reset this counter and reschedule jobs again by hand.
Example Usage:
>>> print dirac.reschedule(12345)
{'OK': True, 'Value': [12345]}
:param jobID: JobID
:type jobID: int, str or python:list
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=True)
if not ret['OK']:
return ret
jobID = ret['Value']
result = WMSClient().rescheduleJob(jobID)
if result['OK']:
if self.jobRepo:
repoDict = {}
for jobID in result['Value']:
repoDict[jobID] = {'State': 'Submitted'}
self.jobRepo.updateJobs(repoDict)
return result
def kill(self, jobID):
return self.killJob(jobID)
def killJob(self, jobID):
"""Issue a kill signal to a running job. If a job has already completed this
action is harmless but otherwise the process will be killed on the compute
resource by the Watchdog.
Example Usage:
>>> print dirac.kill(12345)
{'OK': True, 'Value': [12345]}
:param jobID: JobID
:type jobID: int, str or python:list
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=True)
if not ret['OK']:
return ret
jobID = ret['Value']
result = WMSClient().killJob(jobID)
if result['OK']:
if self.jobRepo:
for jobID in result['Value']:
self.jobRepo.removeJob(jobID)
return result
#############################################################################
def status(self, jobID):
return self.getJobStatus(jobID)
def getJobStatus(self, jobID):
"""Monitor the status of DIRAC Jobs.
Example Usage:
>>> print dirac.status(79241)
{79241: {'status': 'Done', 'site': 'LCG.CERN.ch'}}
:param jobID: JobID
:type jobID: int, str or python:list
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=True)
if not ret['OK']:
return ret
jobID = ret['Value']
monitoring = RPCClient('WorkloadManagement/JobMonitoring')
statusDict = monitoring.getJobsStatus(jobID)
minorStatusDict = monitoring.getJobsMinorStatus(jobID)
siteDict = monitoring.getJobsSites(jobID)
if not statusDict['OK']:
self.log.warn('Could not obtain job status information')
return statusDict
if not siteDict['OK']:
self.log.warn('Could not obtain job site information')
return siteDict
if not minorStatusDict['OK']:
self.log.warn('Could not obtain job minor status information')
return minorStatusDict
result = {}
repoDict = {}
for job, vals in statusDict['Value'].iteritems():
result[job] = vals
if self.jobRepo:
repoDict[job] = {'State': vals['Status']}
if self.jobRepo:
self.jobRepo.updateJobs(repoDict)
for job, vals in siteDict['Value'].iteritems():
result[job].update(vals)
for job, vals in minorStatusDict['Value'].iteritems():
result[job].update(vals)
for job in result:
result[job].pop('JobID', None)
return S_OK(result)
#############################################################################
def getJobInputData(self, jobID):
"""Retrieve the input data requirement of any job existing in the workload management
system.
Example Usage:
>>> dirac.getJobInputData(1405)
{'OK': True, 'Value': {1405:
['LFN:/lhcb/production/DC06/phys-v2-lumi5/00001680/DST/0000/00001680_00000490_5.dst']}}
:param jobID: JobID
:type jobID: int, str or python:list
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=True)
if not ret['OK']:
return ret
jobID = ret['Value']
summary = {}
monitoring = RPCClient('WorkloadManagement/JobMonitoring')
for job in jobID:
result = monitoring.getInputData(job)
if result['OK']:
summary[job] = result['Value']
else:
self.log.warn('Getting input data for job %s failed with message:\n%s' % (job, result['Message']))
summary[job] = []
return S_OK(summary)
#############################################################################
def getJobOutputLFNs(self, jobID):
""" Retrieve the output data LFNs of a given job locally.
This does not download the output files but simply returns the LFN list
that a given job has produced.
Example Usage:
>>> dirac.getJobOutputLFNs(1405)
{'OK':True,'Value':[<LFN>]}
:param jobID: JobID
:type jobID: int or string
:returns: S_OK,S_ERROR
"""
try:
jobID = int(jobID)
except Exception as x:
return self._errorReport(str(x), 'Expected integer or string for existing jobID')
result = self.parameters(jobID)
if not result['OK']:
return result
if not result['Value'].get('UploadedOutputData'):
self.log.info('Parameters for job %s do not contain uploaded output data:\n%s' % (jobID, result))
return S_ERROR('No output data found for job %s' % jobID)
outputData = result['Value']['UploadedOutputData']
outputData = outputData.replace(' ', '').split(',')
if not outputData:
return S_ERROR('No output data files found')
self.log.verbose('Found the following output data LFNs:\n', '\n'.join(outputData))
return S_OK(outputData)
#############################################################################
def getJobOutputData(self, jobID, outputFiles='', destinationDir=''):
""" Retrieve the output data files of a given job locally.
Optionally restrict the download of output data to a given file name or
list of files using the outputFiles option, by default all job outputs
will be downloaded.
Example Usage:
>>> dirac.getJobOutputData(1405)
{'OK':True,'Value':[<LFN>]}
:param jobID: JobID
:type jobID: int or string
:param outputFiles: Optional files to download
:type outputFiles: str or python:list
:returns: S_OK,S_ERROR
"""
try:
jobID = int(jobID)
except Exception as x:
return self._errorReport(str(x), 'Expected integer or string for existing jobID')
result = self.parameters(jobID)
if not result['OK']:
return result
if not result['Value'].get('UploadedOutputData'):
self.log.info('Parameters for job %s do not contain uploaded output data:\n%s' % (jobID, result))
return S_ERROR('No output data found for job %s' % jobID)
outputData = result['Value']['UploadedOutputData']
outputData = outputData.replace(' ', '').split(',')
if not outputData:
return S_ERROR('No output data files found to download')
if outputFiles:
if isinstance(outputFiles, basestring):
outputFiles = [os.path.basename(outputFiles)]
elif isinstance(outputFiles, list):
try:
outputFiles = [os.path.basename(fname) for fname in outputFiles]
except Exception as x:
return self._errorReport(str(x), 'Expected strings for output file names')
else:
return self._errorReport('Expected strings for output file names')
self.log.info('Found specific outputFiles to download:', ', '.join(outputFiles))
newOutputData = []
for outputFile in outputData:
if os.path.basename(outputFile) in outputFiles:
newOutputData.append(outputFile)
self.log.verbose('%s will be downloaded' % outputFile)
else:
self.log.verbose('%s will be ignored' % outputFile)
outputData = newOutputData
# These two lines will break backwards compatibility.
# if not destinationDir:
# destinationDir = jobID
obtainedFiles = []
for outputFile in outputData:
self.log.info('Attempting to retrieve %s' % outputFile)
result = self.getFile(outputFile, destDir=destinationDir)
if not result['OK']:
self.log.error('Failed to download %s' % outputFile)
return result
else:
localPath = "%s/%s" % (destinationDir, os.path.basename(outputFile))
obtainedFiles.append(os.path.realpath(localPath))
if self.jobRepo:
self.jobRepo.updateJob(jobID, {'OutputData': 1, 'OutputFiles': obtainedFiles})
return S_OK(outputData)
#############################################################################
def selectJobs(self, status=None, minorStatus=None, applicationStatus=None,
site=None, owner=None, ownerGroup=None, jobGroup=None, date=None,
printErrors=True):
"""Options correspond to the web-page table columns. Returns the list of JobIDs for
the specified conditions. A few notes on the formatting:
- date must be specified as yyyy-mm-dd. By default, the date is today.
- jobGroup corresponds to the name associated to a group of jobs, e.g. productionID / job names.
- site is the DIRAC site name, e.g. LCG.CERN.ch
- owner is the immutable nickname, e.g. paterson
Example Usage:
>>> dirac.selectJobs( status='Failed', owner='paterson', site='LCG.CERN.ch')
{'OK': True, 'Value': ['25020', '25023', '25026', '25027', '25040']}
:param status: Job status
:type status: string
:param minorStatus: Job minor status
:type minorStatus: string
:param applicationStatus: Job application status
:type applicationStatus: string
:param site: Job execution site
:type site: string
:param owner: Job owner
:type owner: string
:param jobGroup: Job group
:type jobGroup: string
:param date: Selection date
:type date: string
:returns: S_OK,S_ERROR
"""
options = {'Status': status, 'MinorStatus': minorStatus, 'ApplicationStatus': applicationStatus, 'Owner': owner,
'Site': site, 'JobGroup': jobGroup, 'OwnerGroup': ownerGroup}
conditions = dict((key, str(value)) for key, value in options.iteritems() if value)
if date:
try:
date = str(date)
except Exception as x:
return self._errorReport(str(x), 'Expected yyyy-mm-dd string for date')
else:
date = '%s' % Time.date()
self.log.verbose('Setting date to %s' % (date))
self.log.verbose('Will select jobs with last update %s and following conditions' % date)
self.log.verbose(self.pPrint.pformat(conditions))
monitoring = RPCClient('WorkloadManagement/JobMonitoring')
result = monitoring.getJobs(conditions, date)
if not result['OK']:
if printErrors:
self.log.warn(result['Message'])
jobIDs = result['Value']
self.log.verbose('%s job(s) selected' % (len(jobIDs)))
if not printErrors:
return result
if not jobIDs:
self.log.error("No jobs selected", "with date '%s' for conditions: %s" % (str(date), conditions))
return S_ERROR("No jobs selected")
return result
#############################################################################
def getJobSummary(self, jobID, outputFile=None, printOutput=False):
"""Output similar to the web page can be printed to the screen
or stored as a file or just returned as a dictionary for further usage.
Jobs can be specified individually or as a list.
Example Usage:
>>> dirac.getJobSummary(959209)
{'OK': True, 'Value': {959209: {'Status': 'Staging', 'LastUpdateTime': '2008-12-08 16:43:18',
'MinorStatus': '28 / 30', 'Site': 'Unknown', 'HeartBeatTime': 'None', 'ApplicationStatus': 'unknown',
'JobGroup': '00003403', 'Owner': 'joel', 'SubmissionTime': '2008-12-08 16:41:38'}}}
:param jobID: JobID
:type jobID: int or string
:param outputFile: Optional output file
:type outputFile: string
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=True)
if not ret['OK']:
return ret
jobID = ret['Value']
headers = ['Status', 'MinorStatus', 'ApplicationStatus', 'Site', 'JobGroup', 'LastUpdateTime',
'HeartBeatTime', 'SubmissionTime', 'Owner']
monitoring = RPCClient('WorkloadManagement/JobMonitoring')
result = monitoring.getJobsSummary(jobID)
if not result['OK']:
self.log.warn(result['Message'])
return result
try:
jobSummary = eval(result['Value'])
# self.log.info(self.pPrint.pformat(jobSummary))
except Exception as x:
self.log.warn('Problem interpreting result from job monitoring service')
return S_ERROR('Problem while converting result from job monitoring')
summary = {}
for job in jobID:
summary[job] = {}
for key in headers:
if job not in jobSummary:
self.log.warn('No records for JobID %s' % job)
value = jobSummary.get(job, {}).get(key, 'None')
summary[job][key] = value
if outputFile:
if os.path.exists(outputFile):
return self._errorReport('Output file %s already exists' % (outputFile))
dirPath = os.path.basename(outputFile)
if re.search('/', dirPath) and not os.path.exists(dirPath):
try:
os.mkdir(dirPath)
except Exception as x:
return self._errorReport(str(x), 'Could not create directory %s' % (dirPath))
with open(outputFile, 'w') as fopen:
line = 'JobID'.ljust(12)
for i in headers:
line += i.ljust(35)
fopen.write(line + '\n')
for jobID, params in summary.iteritems():
line = str(jobID).ljust(12)
for header in headers:
for key, value in params.iteritems():
if header == key:
line += value.ljust(35)
fopen.write(line + '\n')
self.log.verbose('Output written to %s' % outputFile)
if printOutput:
print self.pPrint.pformat(summary)
return S_OK(summary)
#############################################################################
def getJobDebugOutput(self, jobID):
"""Developer function. Try to retrieve all possible outputs including
logging information, job parameters, sandbox outputs, pilot outputs,
last heartbeat standard output, JDL and CPU profile.
Example Usage:
>>> dirac.getJobDebugOutput(959209)
{'OK': True, 'Value': '/afs/cern.ch/user/p/paterson/DEBUG_959209'}
:param jobID: JobID
:type jobID: int or string
:returns: S_OK,S_ERROR
"""
try:
jobID = int(jobID)
except Exception as x:
return self._errorReport(str(x), 'Expected integer or string for existing jobID')
result = self.status(jobID)
if not result['OK']:
self.log.info('Could not obtain status information for jobID %s, please check this is valid.' % jobID)
return S_ERROR('JobID %s not found in WMS' % jobID)
else:
self.log.info('Job %s' % result['Value'])
debugDir = '%s/DEBUG_%s' % (os.getcwd(), jobID)
try:
os.mkdir(debugDir)
except Exception as x:
return self._errorReport(str(x), 'Could not create directory in %s' % (debugDir))
try:
result = self.getOutputSandbox(jobID, '%s' % (debugDir))
msg = []
if not result['OK']:
msg.append('Output Sandbox: Retrieval Failed')
else:
msg.append('Output Sandbox: Retrieved')
except Exception as x:
msg.append('Output Sandbox: Not Available')
try:
result = self.getInputSandbox(jobID, '%s' % (debugDir))
if not result['OK']:
msg.append('Input Sandbox: Retrieval Failed')
else:
msg.append('Input Sandbox: Retrieved')
except Exception as x:
msg.append('Input Sandbox: Not Available')
try:
result = self.parameters(jobID)
if not result['OK']:
msg.append('Job Parameters: Retrieval Failed')
else:
self.__writeFile(result['Value'], '%s/JobParameters' % (debugDir))
msg.append('Job Parameters: Retrieved')
except Exception as x:
msg.append('Job Parameters: Not Available')
try:
result = self.peek(jobID)
if not result['OK']:
msg.append('Last Heartbeat StdOut: Retrieval Failed')
else:
self.__writeFile(result['Value'], '%s/LastHeartBeat' % (debugDir))
msg.append('Last Heartbeat StdOut: Retrieved')
except Exception as x:
msg.append('Last Heartbeat StdOut: Not Available')
try:
result = self.loggingInfo(jobID)
if not result['OK']:
msg.append('Logging Info: Retrieval Failed')
else:
self.__writeFile(result['Value'], '%s/LoggingInfo' % (debugDir))
msg.append('Logging Info: Retrieved')
except Exception as x:
msg.append('Logging Info: Not Available')
try:
result = self.getJobJDL(jobID)
if not result['OK']:
msg.append('Job JDL: Retrieval Failed')
else:
self.__writeFile(result['Value'], '%s/Job%s.jdl' % (debugDir, jobID))
msg.append('Job JDL: Retrieved')
except Exception as x:
msg.append('Job JDL: Not Available')
try:
result = self.getJobCPUTime(jobID)
if not result['OK']:
msg.append('CPU Profile: Retrieval Failed')
else:
self.__writeFile(result['Value'], '%s/JobCPUProfile' % (debugDir))
msg.append('CPU Profile: Retrieved')
except Exception as x:
msg.append('CPU Profile: Not Available')
self.log.info('Summary of debugging outputs for job %s retrieved in directory:\n%s\n' % (jobID, debugDir),
'\n'.join(msg))
return S_OK(debugDir)
#############################################################################
def __writeFile(self, pObject, fileName):
"""Internal function. Writes a python object to a specified file path.
"""
with open(fileName, 'w') as fopen:
if not isinstance(pObject, basestring):
fopen.write('%s\n' % self.pPrint.pformat(pObject))
else:
fopen.write(pObject)
#############################################################################
def getJobCPUTime(self, jobID, printOutput=False):
"""Retrieve job CPU consumed heartbeat data from job monitoring
service. Jobs can be specified individually or as a list.
The time stamps and raw CPU consumed (s) are returned (if available).
Example Usage:
>>> d.getJobCPUTime(959209)
{'OK': True, 'Value': {959209: {}}}
:param jobID: JobID
:type jobID: int or string
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=True)
if not ret['OK']:
return ret
jobID = ret['Value']
summary = {}
for job in jobID:
monitoring = RPCClient('WorkloadManagement/JobMonitoring')
result = monitoring.getJobHeartBeatData(job)
summary[job] = {}
if not result['OK']:
return self._errorReport(result['Message'], 'Could not get heartbeat data for job %s' % job)
if result['Value']:
tupleList = result['Value']
for tup in tupleList:
if tup[0] == 'CPUConsumed':
summary[job][tup[2]] = tup[1]
else:
self.log.warn('No heartbeat data for job %s' % job)
if printOutput:
print self.pPrint.pformat(summary)
return S_OK(summary)
#############################################################################
def attributes(self, jobID, printOutput=False):
return self.getJobAttributes(jobID, printOutput=printOutput)
def getJobAttributes(self, jobID, printOutput=False):
"""Return DIRAC attributes associated with the given job.
Each job will have certain attributes that affect the journey through the
workload management system, see example below. Attributes are optionally
printed to the screen.
Example Usage:
>>> print dirac.attributes(79241)
{'AccountedFlag': 'False','ApplicationNumStatus': '0',
'ApplicationStatus': 'Job Finished Successfully',
'CPUTime': '0.0','DIRACSetup': 'LHCb-Production'}
:param jobID: JobID
:type jobID: int, str or python:list
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=False)
if not ret['OK']:
return ret
jobID = ret['Value']
monitoring = RPCClient('WorkloadManagement/JobMonitoring')
result = monitoring.getJobAttributes(jobID)
if not result['OK']:
return result
if printOutput:
print '=================\n', jobID
print self.pPrint.pformat(result['Value'])
return result
#############################################################################
def parameters(self, jobID, printOutput=False):
return self.getJobParameters(jobID, printOutput=printOutput)
def getJobParameters(self, jobID, printOutput=False):
"""Return DIRAC parameters associated with the given job.
DIRAC keeps track of several job parameters which are kept in the job monitoring
service, see example below. Selected parameters also printed to screen.
Example Usage:
>>> print dirac.parameters(79241)
{'OK': True, 'Value': {'JobPath': 'JobPath,JobSanity,JobPolicy,InputData,JobScheduling,TaskQueue',
'JobSanityCheck': 'Job: 768 JDL: OK, InputData: 2 LFNs OK, ','LocalBatchID': 'dc768'}
:param jobID: JobID
:type jobID: int or string
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=False)
if not ret['OK']:
return ret
jobID = ret['Value']
monitoring = RPCClient('WorkloadManagement/JobMonitoring')
result = monitoring.getJobParameters(jobID)
if not result['OK']:
return result
result['Value'].pop('StandardOutput', None)
if printOutput:
print self.pPrint.pformat(result['Value'])
return result
#############################################################################
def loggingInfo(self, jobID, printOutput=False):
return self.getJobLoggingInfo(jobID, printOutput=printOutput)
def getJobLoggingInfo(self, jobID, printOutput=False):
"""DIRAC keeps track of job transitions which are kept in the job monitoring
service, see example below. Logging summary also printed to screen at the
INFO level.
Example Usage:
>>> print dirac.loggingInfo(79241)
{'OK': True, 'Value': [('Received', 'JobPath', 'Unknown', '2008-01-29 15:37:09', 'JobPathAgent'),
('Checking', 'JobSanity', 'Unknown', '2008-01-29 15:37:14', 'JobSanityAgent')]}
:param jobID: JobID
:type jobID: int or string
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=False)
if not ret['OK']:
return ret
jobID = ret['Value']
monitoring = RPCClient('WorkloadManagement/JobMonitoring')
result = monitoring.getJobLoggingInfo(jobID)
if not result['OK']:
self.log.warn('Could not retrieve logging information for job %s' % jobID)
self.log.warn(result)
return result
if printOutput:
loggingTupleList = result['Value']
fields = ['Source', 'Status', 'MinorStatus', 'ApplicationStatus', 'DateTime']
records = []
for l in loggingTupleList:
records.append([l[i] for i in (4, 0, 1, 2, 3)])
printTable(fields, records, numbering=False, columnSeparator=' ')
return result
#############################################################################
def peek(self, jobID, printout=False, printOutput=False):
return self.peekJob(jobID, printOutput=printout or printOutput)
def peekJob(self, jobID, printOutput=False):
"""The peek function will attempt to return standard output from the WMS for
a given job if this is available. The standard output is periodically
updated from the compute resource via the application Watchdog. Available
standard output is printed to screen at the INFO level.
Example Usage:
>>> print dirac.peek(1484)
{'OK': True, 'Value': 'Job peek result'}
:param jobID: JobID
:type jobID: int or string
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=False)
if not ret['OK']:
return ret
jobID = ret['Value']
monitoring = RPCClient('WorkloadManagement/JobMonitoring')
result = monitoring.getJobParameter(jobID, 'StandardOutput')
if not result['OK']:
return self._errorReport(result, 'Could not retrieve job attributes')
stdout = result['Value'].get('StandardOutput')
if stdout:
if printOutput:
self.log.notice(stdout)
else:
self.log.verbose(stdout)
else:
stdout = 'Not available yet.'
self.log.info('No standard output available to print.')
return S_OK(stdout)
#############################################################################
def ping(self, system, service, printOutput=False, url=None):
return self.pingService(system, service, printOutput=printOutput, url=url)
def pingService(self, system, service, printOutput=False, url=None):
"""The ping function will attempt to return standard information from a system
service if this is available. If the ping() command is unsuccessful it could
indicate a period of service unavailability.
Example Usage:
>>> print dirac.ping('WorkloadManagement','JobManager')
{'OK': True, 'Value': 'Job ping result'}
:param system: system
:type system: string
:param service: service name
:type service: string
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:param url: url to ping (instad of system & service)
:type url: string
:returns: S_OK,S_ERROR
"""
if not isinstance(system, basestring) and isinstance(service, basestring) and not isinstance(url, basestring):
return self._errorReport('Expected string for system and service or a url to ping()')
result = S_ERROR()
try:
if not url:
systemSection = getSystemSection(system + '/')
self.log.verbose('System section is: %s' % (systemSection))
section = '%s/%s' % (systemSection, service)
self.log.verbose('Requested service should have CS path: %s' % (section))
serviceURL = getServiceURL('%s/%s' % (system, service))
self.log.verbose('Service URL is: %s' % (serviceURL))
client = RPCClient('%s/%s' % (system, service))
else:
serviceURL = url
client = RPCClient(url)
result = client.ping()
if result['OK']:
result['Value']['service url'] = serviceURL
except Exception as x:
self.log.warn('ping for %s/%s failed with exception:\n%s' % (system, service, str(x)))
result['Message'] = str(x)
if printOutput:
print self.pPrint.pformat(result)
return result
#############################################################################
def getJobJDL(self, jobID, original=False, printOutput=False):
"""Simple function to retrieve the current JDL of an existing job in the
workload management system. The job JDL is converted to a dictionary
and returned in the result structure.
Example Usage:
>>> print dirac.getJobJDL(12345)
{'Arguments': 'jobDescription.xml',...}
:param jobID: JobID
:type jobID: int or string
:returns: S_OK,S_ERROR
"""
ret = self._checkJobArgument(jobID, multiple=False)
if not ret['OK']:
return ret
jobID = ret['Value']
monitoring = RPCClient('WorkloadManagement/JobMonitoring')
result = monitoring.getJobJDL(jobID, original)
if not result['OK']:
return result
result = self.__getJDLParameters(result['Value'])
if printOutput:
print self.pPrint.pformat(result['Value'])
return result
#############################################################################
def __getJDLParameters(self, jdl):
""" Internal function. Returns a dictionary of JDL parameters.
:param jdl: a JDL
:type jdl: ~DIRAC.Interfaces.API.Job.Job or str or file
"""
if hasattr(jdl, '_toJDL'):
jdl = jdl._toJDL()
elif os.path.exists(jdl):
with open(jdl, 'r') as jdlFile:
jdl = jdlFile.read()
if not isinstance(jdl, basestring):
return S_ERROR("Can't read JDL")
try:
parameters = {}
if '[' not in jdl:
jdl = '[' + jdl + ']'
classAdJob = ClassAd(jdl)
paramsDict = classAdJob.contents
for param, value in paramsDict.iteritems():
if re.search('{', value):
self.log.debug('Found list type parameter %s' % (param))
rawValues = value.replace('{', '').replace('}', '').replace('"', '').replace('LFN:', '').split()
valueList = []
for val in rawValues:
if re.search(',$', val):
valueList.append(val[:-1])
else:
valueList.append(val)
parameters[param] = valueList
else:
self.log.debug('Found standard parameter %s' % (param))
parameters[param] = value.replace('"', '')
return S_OK(parameters)
except Exception as x:
self.log.exception(lException=x)
return S_ERROR('Exception while extracting JDL parameters for job')
#############################################################################
def __printInfo(self):
"""Internal function to print the DIRAC API version and related information.
"""
self.log.info('<=====%s=====>' % (self.diracInfo))
self.log.verbose('DIRAC is running at %s in setup %s' % (DIRAC.siteName(), self.setup))
def getConfigurationValue(self, option, default):
""" Export the configuration client getValue() function
"""
return gConfig.getValue(option, default)
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| gpl-3.0 | -2,685,843,893,400,082,400 | 36.646723 | 165 | 0.619861 | false |
hugobranquinho/ines | ines/config.py | 1 | 23535 | # -*- coding: utf-8 -*-
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
from collections import defaultdict
from inspect import getargspec
from colander import Invalid
from pkg_resources import get_distribution
from pkg_resources import resource_filename
from pyramid.config import Configurator as PyramidConfigurator
from pyramid.compat import is_nonstr_iter
from pyramid.decorator import reify
from pyramid.exceptions import Forbidden
from pyramid.exceptions import NotFound
from pyramid.httpexceptions import HTTPClientError
from pyramid.i18n import get_localizer
from pyramid.interfaces import IExceptionResponse
from pyramid.path import caller_package
from pyramid.security import NO_PERMISSION_REQUIRED
from pyramid.settings import asbool
from pyramid.static import static_view
from pyramid.threadlocal import get_current_request
from ines import (
API_CONFIGURATION_EXTENSIONS, APPLICATIONS, DEFAULT_METHODS, DEFAULT_RENDERERS, DEFAULT_CACHE_DIRPATH,
lazy_import_module)
from ines.api import BaseSession
from ines.api import BaseSessionManager
from ines.api.jobs import BaseJobsManager
from ines.api.jobs import BaseJobsSession
from ines.api.mailer import BaseMailerSession
from ines.authentication import ApplicationHeaderAuthenticationPolicy
from ines.authorization import Everyone
from ines.authorization import INES_POLICY
from ines.authorization import TokenAuthorizationPolicy
from ines.cache import SaveMe, SaveMeMemcached
from ines.convert import maybe_list
from ines.exceptions import Error
from ines.exceptions import HTTPBrowserUpgrade
from ines.interfaces import IBaseSessionManager
from ines.interfaces import IInputSchemaView
from ines.interfaces import IOutputSchemaView
from ines.interfaces import ISchemaView
from ines.middlewares import DEFAULT_MIDDLEWARE_POSITION
from ines.path import find_class_on_module
from ines.path import get_object_on_path
from ines.view import gzip_static_view
from ines.views.postman import PostmanCollection
from ines.views.schema import SchemaView
from ines.request import InesRequest
from ines.route import RootFactory
from ines.utils import WarningDict
def configuration_extensions(setting_key):
def decorator(wrapped):
API_CONFIGURATION_EXTENSIONS[setting_key] = wrapped.__name__
return wrapped
return decorator
class APIWarningDict(WarningDict):
def __setitem__(self, key, value):
if key in self:
existing_value = self[key]
existing_path = '%s:%s' % (existing_value.__module__, existing_value.__name__)
path = '%s:%s' % (value.__module__, value.__name__)
if existing_path == path:
# Do nothing!
return
super(APIWarningDict, self).__setitem__(key, value)
class Configurator(PyramidConfigurator):
def __init__(
self,
application_name=None,
global_settings=None,
**kwargs):
if 'registry' in kwargs:
for application_config in APPLICATIONS.values():
if application_config.registry is kwargs['registry']:
# Nothing to do where. .scan() Configuration
super(Configurator, self).__init__(**kwargs)
return # Nothing else to do where
if 'package' in kwargs:
# Nothing to do where.
super(Configurator, self).__init__(**kwargs)
return
kwargs['package'] = caller_package()
settings = kwargs['settings'] = dict(kwargs.get('settings') or {})
kwargs['settings'].update(global_settings or {})
# Define pyramid debugs
settings['debug'] = asbool(settings.get('debug', False))
if 'reload_all' not in settings:
settings['reload_all'] = settings['debug']
if 'debug_all' not in settings:
settings['debug_all'] = settings['debug']
if 'reload_templates' not in settings:
settings['reload_templates'] = settings['debug']
if 'root_factory' not in kwargs:
kwargs['root_factory'] = RootFactory
if 'request_factory' not in kwargs:
kwargs['request_factory'] = InesRequest
super(Configurator, self).__init__(**kwargs)
self.registry.config = self
self.registry.package_name = self.registry.__name__
# Define application_name
self.application_name = application_name or self.package_name
self.registry.application_name = self.application_name
# Define global cache
cache_settings = {
key[6:]: value
for key, value in self.settings.items()
if key.startswith('cache.')}
cache_type = cache_settings.pop('type', None)
if cache_type == 'memcached':
self.cache = SaveMeMemcached(**cache_settings)
else:
if 'path' not in cache_settings:
cache_settings['path'] = DEFAULT_CACHE_DIRPATH
self.cache = SaveMe(**cache_settings)
# Find extensions on settings
bases = APIWarningDict('Duplicated name "{key}" for API Class')
sessions = APIWarningDict('Duplicated name "{key}" for API Session')
for key, value in self.settings.items():
if key.startswith('api.'):
options = key.split('.', 2)[1:]
if len(options) == 1:
name, option = options[0], 'session_path'
else:
name, option = options
if option == 'session_path':
if isinstance(value, str):
sessions[name] = get_object_on_path(value)
else:
sessions[name] = value
elif option == 'class_path':
if isinstance(value, str):
bases[name] = get_object_on_path(value)
else:
bases[name] = value
# Find sessions on module
for session in find_class_on_module(self.package, BaseSession):
app_name = getattr(session, '__app_name__', None)
if not app_name or app_name == application_name:
sessions[session.__api_name__] = session
# Find session manager on module
for session_manager in find_class_on_module(
self.package,
BaseSessionManager):
app_name = getattr(session_manager, '__app_name__', None)
if not app_name or app_name == application_name:
bases[session_manager.__api_name__] = session_manager
# Find default session manager
default_bases = defaultdict(list)
for session_manager in find_class_on_module('ines.api', BaseSessionManager):
api_name = getattr(session_manager, '__api_name__', None)
default_bases[api_name].append(session_manager)
# Define extensions
for api_name, session in sessions.items():
session_manager = bases.get(api_name)
if session_manager is None:
session_manager = getattr(session, '__default_session_manager__', None)
if session_manager is None:
default_session_managers = default_bases.get(api_name)
if not default_session_managers:
session_manager = BaseSessionManager
else:
session_manager = default_session_managers[0]
self.registry.registerUtility(
session_manager(self, session, api_name),
provided=IBaseSessionManager,
name=api_name)
# Middlewares
self.middlewares = []
# Register package
APPLICATIONS[self.application_name] = self
# Default translations dirs
self.add_translation_dirs('colander:locale')
self.add_translation_dirs('ines:locale')
@reify
def settings(self):
return self.registry.settings
@reify
def debug(self):
return self.settings.get('debug')
@reify
def version(self):
return get_distribution(self.package_name).version
@property
def is_production_environ(self):
return asbool(self.settings['is_production_environ'])
def add_routes(self, *routes, **kwargs):
for arguments in routes:
if not arguments:
raise ValueError('Define some arguments')
elif not isinstance(arguments, dict):
list_arguments = maybe_list(arguments)
arguments = {'name': list_arguments[0]}
if len(list_arguments) > 1:
arguments['pattern'] = list_arguments[1]
self.add_route(**arguments)
def add_default_renderers(self):
import ines.renderers
super(Configurator, self).add_default_renderers()
for key, renderer in DEFAULT_RENDERERS.items():
self.add_renderer(key, renderer)
def add_view(self, *args, **kwargs):
if 'permission' not in kwargs:
# Force permission validation
kwargs['permission'] = INES_POLICY
return super(Configurator, self).add_view(*args, **kwargs)
def lookup_extensions(self):
found_settings = defaultdict(dict)
for find_setting_key, method_name in API_CONFIGURATION_EXTENSIONS.items():
if not find_setting_key.endswith('.'):
find_setting_key += '.'
for key, value in self.settings.items():
if key.startswith(find_setting_key):
setting_key = key.split(find_setting_key, 1)[1]
found_settings[method_name][setting_key] = value
for method_name, settings in found_settings.items():
method = getattr(self, method_name, None)
if method is not None:
method_settings = {
argument: settings[argument]
for argument in getargspec(method).args
if argument in settings}
method(**method_settings)
def install_middleware(self, name, middleware, settings=None):
self.middlewares.append((name, middleware, settings or {}))
def make_wsgi_app(self, install_middlewares=True):
# Find for possible configuration extensions
self.lookup_extensions()
# Scan all package routes
self.scan(self.package_name, categories=['pyramid'])
# Scan package jobs
scan_jobs = False
jobs_manager = None
for name, extension in self.registry.getUtilitiesFor(IBaseSessionManager):
if issubclass(extension.session, BaseMailerSession) and 'queue_path' in extension.settings:
scan_jobs = True
elif issubclass(extension.session, BaseJobsSession):
scan_jobs = True
jobs_manager = extension
elif isinstance(extension, BaseJobsManager):
jobs_manager = extension
if scan_jobs:
if jobs_manager is None:
raise ValueError('Please define module for jobs.')
self.scan(self.package_name, categories=['ines.jobs'], jobs_manager=jobs_manager)
self.scan('ines', categories=['ines.jobs'], jobs_manager=jobs_manager)
app = super(Configurator, self).make_wsgi_app()
if install_middlewares:
# Look for middlewares in API Sessions
for name, extension in self.registry.getUtilitiesFor(IBaseSessionManager):
if hasattr(extension, '__middlewares__'):
for extension_middleware in extension.__middlewares__:
self.install_middleware(
extension_middleware.name,
extension_middleware,
settings={'api_manager': extension})
# Define middleware settings
middlewares_settings = defaultdict(dict)
for key, value in self.settings.items():
if key.startswith('middleware.'):
maybe_name = key.split('middleware.', 1)[1]
if '.' in maybe_name:
parts = maybe_name.split('.')
setting_key = parts[-1]
name = '.'.join(parts[:-1])
middlewares_settings[name][setting_key] = value
else:
# Install settings middlewares
middleware_class = get_object_on_path(value)
self.install_middleware(maybe_name, middleware_class)
# Install middlewares with reversed order. Lower position first
if self.middlewares:
middlewares = []
for name, middleware, settings in self.middlewares:
middlewares_settings[name].update(settings)
default_position = getattr(middleware, 'position', DEFAULT_MIDDLEWARE_POSITION.get(name))
position = settings.get('position', default_position) or 0
middlewares.append((position, name, middleware))
middlewares.sort(reverse=True)
for position, name, middleware in middlewares:
app = middleware(self, app, **middlewares_settings[name])
app.name = name
return app
@configuration_extensions('api.policy.token')
def set_token_policy(
self,
application_name,
header_key=None,
cookie_key=None):
# Authentication Policy
authentication_policy = ApplicationHeaderAuthenticationPolicy(
application_name,
header_key=header_key,
cookie_key=cookie_key)
self.set_authentication_policy(authentication_policy)
authorization_policy = TokenAuthorizationPolicy(application_name)
self.set_authorization_policy(authorization_policy)
@configuration_extensions('errors.interface')
def add_errors_interface(
self,
not_found=None,
forbidden=None,
global_error=None,
error=None,
browser_error=None,
):
if browser_error:
self.add_view(
view=browser_error,
context=HTTPBrowserUpgrade,
permission=NO_PERMISSION_REQUIRED,
exception_only=True)
if not_found:
self.add_view(
view=not_found,
context=NotFound,
permission=NO_PERMISSION_REQUIRED,
exception_only=True)
if forbidden:
self.add_view(
view=forbidden,
context=Forbidden,
permission=NO_PERMISSION_REQUIRED,
exception_only=True)
if global_error:
self.settings['errors.interface.global_error_view'] = global_error_view = self.maybe_dotted(global_error)
self.add_view(
view=global_error_view,
context=IExceptionResponse,
permission=NO_PERMISSION_REQUIRED,
exception_only=True)
if error:
self.settings['errors.interface.error_view'] = error_view = self.maybe_dotted(error)
self.add_view(
view=error_view,
context=Error,
permission=NO_PERMISSION_REQUIRED,
exception_only=True)
self.add_view(
view=error_view,
context=Invalid,
permission=NO_PERMISSION_REQUIRED,
exception_only=True)
@configuration_extensions('deform')
def set_deform_translation(self, path=None, production_path=None, base_static_path=None):
def translator(term):
return get_localizer(get_current_request()).translate(term)
deform = lazy_import_module('deform')
path = self.is_production_environ and production_path or path
if path:
deform_template_dir = resource_filename(*path.split(':', 1))
zpt_renderer = deform.ZPTRendererFactory(
[deform_template_dir],
translator=translator)
deform.Form.set_default_renderer(zpt_renderer)
if base_static_path:
if not base_static_path.endswith('/'):
base_static_path += '/'
for versions in deform.widget.default_resources.values():
for resources in versions.values():
for resource_type, resource in resources.items():
new_resources = [
r.replace('deform:static/', base_static_path, 1)
for r in maybe_list(resource)]
if not is_nonstr_iter(resource):
resources[resource_type] = new_resources[0]
else:
resources[resource_type] = tuple(new_resources)
self.add_translation_dirs('deform:locale')
if not base_static_path:
self.add_static_view('deform', 'deform:static')
def add_static_views(self, *routes, **kwargs):
permission = kwargs.get('permission', Everyone)
cache_max_age = kwargs.get('cache_max_age', Everyone)
for route_name, path, pattern in routes:
self.add_view(
route_name=route_name,
view=static_view(path, cache_max_age=cache_max_age, use_subpath=True),
permission=permission)
self.add_routes((route_name, pattern))
def add_gzip_static_view(self, path, gzip_path, route_name='static', cache_max_age=None, permission=Everyone):
self.add_view(
route_name=route_name,
view=gzip_static_view(path, gzip_path=gzip_path, cache_max_age=cache_max_age, use_subpath=True),
permission=permission)
def register_input_schema(self, view, route_name, request_method):
for req_method in maybe_list(request_method) or ['']:
utility_name = '%s %s' % (route_name or '', req_method or '')
self.registry.registerUtility(
view,
provided=IInputSchemaView,
name=utility_name)
def lookup_input_schema(self, route_name, request_method=None):
request_method = maybe_list(request_method or DEFAULT_METHODS)
request_method.append('')
schemas = []
for req_method in request_method:
utility_name = '%s %s' % (route_name or '', req_method or '')
view = self.registry.queryUtility(IInputSchemaView, name=utility_name)
if view is not None:
schemas.append(view)
return schemas
def register_output_schema(self, view, route_name, request_method):
for req_method in maybe_list(request_method) or ['']:
utility_name = '%s %s' % (route_name or '', req_method or '')
self.registry.registerUtility(
view,
provided=IOutputSchemaView,
name=utility_name)
def lookup_output_schema(self, route_name, request_method=None):
request_method = maybe_list(request_method or DEFAULT_METHODS)
request_method.append('')
schemas = []
for req_method in request_method:
utility_name = '%s %s' % (route_name, req_method or '')
view = self.registry.queryUtility(IOutputSchemaView, name=utility_name)
if view is not None:
schemas.append(view)
return schemas
class APIConfigurator(Configurator):
@configuration_extensions('apidocjs')
def add_apidocjs_view(
self, pattern='documentation', cache_max_age=86400,
resource_name='apidocjs'):
static_func = static_view(
'%s:%s/' % (self.package_name, resource_name),
package_name=self.package_name,
use_subpath=True,
cache_max_age=int(cache_max_age))
self.add_route(resource_name, pattern='%s*subpath' % pattern)
self.add_view(
route_name=resource_name,
view=static_func,
permission=INES_POLICY)
def add_schema_manager(self, view, route_name, pattern, **view_kwargs):
self.registry.registerUtility(
view,
provided=ISchemaView,
name=route_name)
self.add_route(name=route_name, pattern=pattern)
self.add_view(
view,
route_name=route_name,
renderer='json',
request_method='GET',
**view_kwargs)
def add_schema(
self,
pattern,
route_name=None,
list_route_name=None,
schema_route_name=None,
csv_route_name=None,
title=None,
description=None,
request_methods=None,
route_pattern=None,
list_route_pattern=None,
csv_route_pattern=None,
postman_folder_name=None,
**view_kwargs):
schema_route_name = schema_route_name or '%s_schema' % (route_name or list_route_name or csv_route_name)
view = SchemaView(
schema_route_name=schema_route_name,
route_name=route_name,
list_route_name=list_route_name,
csv_route_name=csv_route_name,
title=title,
description=description,
request_methods=request_methods,
postman_folder_name=postman_folder_name)
self.add_schema_manager(view, schema_route_name, pattern, **view_kwargs)
if route_pattern:
self.add_routes((route_name, route_pattern))
if list_route_name and list_route_pattern:
self.add_routes((list_route_name, list_route_pattern))
if csv_route_name and csv_route_pattern:
self.add_routes((csv_route_name, csv_route_pattern))
@configuration_extensions('postman')
def add_postman_route(
self, pattern, name='postman', permission=None,
title=None, description=None):
kwargs = {}
if permission:
kwargs['permission'] = permission
self.add_route(name=name, pattern=pattern)
self.add_view(
PostmanCollection(
title=title or self.application_name,
description=description),
route_name=name,
renderer='json',
**kwargs)
def add_view(self, *args, **kwargs):
if 'renderer' not in kwargs:
kwargs['renderer'] = 'json'
return super(APIConfigurator, self).add_view(*args, **kwargs)
@configuration_extensions('apierrors.interface')
def add_api_errors_interface(self, only_http_errors=False):
# Set JSON handler
self.add_view(
view='ines.views.errors_json_view',
context=HTTPClientError,
permission=NO_PERMISSION_REQUIRED)
if not asbool(only_http_errors):
self.add_view(
view='ines.views.errors_json_view',
context=Error,
permission=NO_PERMISSION_REQUIRED)
if not asbool(only_http_errors):
self.add_view(
view='ines.views.errors_json_view',
context=Invalid,
permission=NO_PERMISSION_REQUIRED)
| mit | -5,630,998,942,843,912,000 | 38.029851 | 117 | 0.589845 | false |
dbentley/pants | src/python/pants/help/help_printer.py | 1 | 4035 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import sys
from pants.base.build_environment import pants_release, pants_version
from pants.help.help_formatter import HelpFormatter
from pants.help.scope_info_iterator import ScopeInfoIterator
from pants.option.arg_splitter import (GLOBAL_SCOPE, NoGoalHelp, OptionsHelp, UnknownGoalHelp,
VersionHelp)
from pants.option.scope import ScopeInfo
class HelpPrinter(object):
"""Prints help to the console.
:API: public
"""
def __init__(self, options):
"""
:API: public
"""
self._options = options
@property
def _help_request(self):
return self._options.help_request
def print_help(self):
"""Print help to the console.
:API: public
:return: 0 on success, 1 on failure
"""
def print_hint():
print('Use `pants goals` to list goals.')
print('Use `pants help` to get help.')
if isinstance(self._help_request, VersionHelp):
print(pants_version())
elif isinstance(self._help_request, OptionsHelp):
self._print_options_help()
elif isinstance(self._help_request, UnknownGoalHelp):
print('Unknown goals: {}'.format(', '.join(self._help_request.unknown_goals)))
print_hint()
return 1
elif isinstance(self._help_request, NoGoalHelp):
print('No goals specified.')
print_hint()
return 1
return 0
def _print_options_help(self):
"""Print a help screen.
Assumes that self._help_request is an instance of OptionsHelp.
Note: Ony useful if called after options have been registered.
"""
show_all_help = self._help_request.all_scopes
if show_all_help:
help_scopes = self._options.known_scope_to_info.keys()
else:
# The scopes explicitly mentioned by the user on the cmd line.
help_scopes = set(self._options.scope_to_flags.keys()) - set([GLOBAL_SCOPE])
scope_infos = list(ScopeInfoIterator(self._options.known_scope_to_info).iterate(help_scopes))
if scope_infos:
for scope_info in scope_infos:
help_str = self._format_help(scope_info)
if help_str:
print(help_str)
return
else:
print(pants_release())
print('\nUsage:')
print(' ./pants [option ...] [goal ...] [target...] Attempt the specified goals.')
print(' ./pants help Get help.')
print(' ./pants help [goal] Get help for a goal.')
print(' ./pants help-advanced [goal] Get help for a goal\'s advanced options.')
print(' ./pants help-all Get help for all goals.')
print(' ./pants goals List all installed goals.')
print('')
print(' [target] accepts two special forms:')
print(' dir: to include all targets in the specified directory.')
print(' dir:: to include all targets found recursively under the directory.')
print('\nFriendly docs:\n http://pantsbuild.github.io/')
print(self._format_help(ScopeInfo(GLOBAL_SCOPE, ScopeInfo.GLOBAL)))
def _format_help(self, scope_info):
"""Return a help message for the options registered on this object.
Assumes that self._help_request is an instance of OptionsHelp.
:param scope_info: Scope of the options.
"""
scope = scope_info.scope
description = scope_info.description
show_recursive = self._help_request.advanced
show_advanced = self._help_request.advanced
color = sys.stdout.isatty()
help_formatter = HelpFormatter(scope, show_recursive, show_advanced, color)
return '\n'.join(help_formatter.format_options(scope, description,
self._options.get_parser(scope).option_registrations_iter()))
| apache-2.0 | 6,728,683,942,428,738,000 | 35.681818 | 102 | 0.640149 | false |
etherealpost/etherealpost.com | etherealpost/db/db.py | 1 | 17493 | import json
import math
from etherealpost.db import helpers
from etherealpost.db.cache import Cache
from etherealpost.db.helpers import gold_string, time_left_string
from etherealpost.db.item import Item
import time
from etherealpost.db.realm import Realm
from etherealpost.db.realm_name import RealmName
class Db(object):
def __init__(self, db):
self.db = db
self.cache = Cache()
def total_auctions(self):
return self.db.Auction.count()
def db_size(self, cache=True):
if cache:
c = self.cache.get('mdbsize')
if c is not None:
return float(c)
db_size = self.db.command({'dbStats': 1})['dataSize']
self.cache.set('mdbsize', db_size, ex=(5*60)) # Cache for 5 mins
return db_size
def total_documents(self, cache=True):
if cache:
c = self.cache.get('mdbobjs')
if c is not None:
return int(c)
total_objs = self.db.command({'dbStats': 1})['objects']
self.cache.set('mdbobjs', total_objs,
ex=(5*60)) # Cache for 5 mins
return total_objs
def data_last_updated(self, cache=True):
if cache:
c = self.cache.get('lup')
if c is not None:
return int(c)
data = self.db.Realm.find({}).sort([('lastUpdated', -1)]).limit(1)[0]
last_updated = data['lastUpdated']
self.cache.set('lup', last_updated, ex=10) # Cache for 10 seconds
return last_updated
def get_auctions_for_item(self, item, realm, cache=True):
"""
:type item: int
:type realm: Realm
:type cache: bool
:rtype: list[Auction]
"""
key = 'i:{0}:{1}'.format(realm.id, item)
lm = realm.lastModified
if cache is True:
c = self.cache.get_json(key=key, last_modified=lm)
if c is not None:
return c
cursor = self.db.Auction.find({
'realm': realm.id,
'item': int(item)
})
auctions = []
for auc in cursor:
del(auc['_id'])
auctions.append(auc)
self.cache.set_json(key=key, data=auctions, last_modified=lm)
return auctions
def get_item(self, item_id):
item = self.db.Item.find_one({'_id': int(item_id)})
return Item(**item) if item is not None else None
@staticmethod
def get_auction_mp(auctions):
if len(auctions):
bp = [math.ceil(a['buyout']/a['quantity']) for a in auctions]
return helpers.get_market_price(bp)
else:
return 0
def get_slugs_by_owner_realm(self, cache=True):
if cache:
c = self.cache.get('slug:or')
if c is not None:
return json.loads(c.decode('utf-8'))
cursor = self.db.RealmName.find({})
slugs = {}
for c in cursor:
if 'ownerRealm' in c:
ownr = c['ownerRealm']
if isinstance(ownr, list):
for i in ownr:
slugs[i] = c['slug']
else:
slugs[ownr] = c['slug']
self.cache.set('slug:or', json.dumps(slugs), ex=(12*60*60)) # 12 hours
return slugs
def names_by_slug(self, cache=True):
if cache:
c = self.cache.get('names:slug')
if c is not None:
return json.loads(c.decode('utf-8'))
cursor = self.db.RealmName.find()
slugs = {}
for c in cursor:
if 'name' in c and 'slug' in c:
slugs[c['slug']] = c['name']
self.cache.set('names:slug', json.dumps(slugs),
ex=(12 * 60 * 60)) # 12 hours
return slugs
def get_unique_sellers(self, realm, limit=None):
query = [
{'$match': {'realm': int(realm.id)}},
{'$group': {
'_id': {'owner': '$owner', 'ownerRealm': '$ownerRealm'},
'count': {'$sum': 1},
'buyout_total': {'$sum': '$buyout'}
}},
{'$sort': {
'count': -1
}}
]
if limit is not None:
query.append({'$limit': limit})
return list(self.db.Auction.aggregate(query))
def get_market_cap(self, realm):
query = [
{'$match': {'realm': int(realm.id)}},
{'$group': {
'_id': None,
'market_cap': {'$sum': '$buyout'},
}}
]
return list(self.db.Auction.aggregate(query))[0]['market_cap']
def get_unique_items(self, realm, limit=None):
query = [
{'$match': {'realm': int(realm.id)}},
{'$group': {
'_id': {'item': '$item'},
'count': {'$sum': 1},
'buyout_total': {'$sum': '$buyout'}
}},
{'$lookup': {
'from': 'Item',
'localField': '_id.item',
'foreignField': '_id',
'as': 'itemDetails'
}},
{'$sort': {
'count': -1
}}
]
if limit is not None:
query.append({'$limit': limit})
return list(self.db.Auction.aggregate(query))
def get_total_auctions(self, realm=None):
if realm is None:
return self. db.Auction.count()
else:
return self.db.Auction.count({'realm': int(realm.id)})
def get_realm_statistics(self, realm, cache=True):
key = 'rstats:{0}'.format(realm.id)
lm = realm.lastModified
if cache is True:
c = self.cache.get_json(key=key, last_modified=lm)
if c is not None:
print('Got from cache! Key: {0}'.format(lm))
return c
unique_sellers = self.get_unique_sellers(realm=realm)
market_cap = self.get_market_cap(realm=realm)
unique_items = self.get_unique_items(realm=realm)
total_auctions = self.get_total_auctions(realm=realm)
return_data = {
'total_auctions': total_auctions,
'unique_sellers': unique_sellers,
'top_sellers': unique_sellers[0:10],
'market_cap': market_cap,
'unique_items': len(unique_items),
'popular_items': unique_items[0:10]
}
# Don't cache if the realm is currently updating
if self.is_realm_running(realm=realm) is False:
self.cache.set_json(key=key, data=return_data, last_modified=lm)
return return_data
def is_realm_running(self, realm):
# Get the current state of the realm
realm = self.get_realm_from_url(region=realm.region,
slug=realm.realms[0])
# Don't cache if the realm is currently updating
return realm.runningNow
def get_realm_stats_for_item(self, realm, item_id, cache=True):
key = 'ph:i:{0}:{1}'.format(realm.id, item_id)
lm = realm.lastModified
if cache is True:
c = self.cache.get_json(key=key, last_modified=lm)
if c is not None:
return c
price_history = self.price_history(realm=realm, item_id=item_id,
region=False)
return_data = {
'median_market': price_history['median_market'],
'avg_market': price_history['avg_market'],
'std_dev': price_history['std_dev'],
'ph_mp_chart': price_history['ph_mp_chart'],
'ph_qty_chart': price_history['ph_qty_chart']
}
# Don't cache if the realm is currently updating
if self.is_realm_running(realm=realm) is False:
self.cache.set_json(key=key, data=return_data, last_modified=lm)
return return_data
def get_owner_realm_by_slug(self, slug, cache=True):
key = 'or:slug:{0}'.format(slug)
if cache:
c = self.cache.get(key)
if c is not None:
return c.decode('utf-8')
realm_name = self.db.RealmName.find_one({'slug': slug})
if realm_name is None:
return None
ownr = RealmName(**realm_name).ownerRealm
self.cache.set(key, ownr, ex=(12*60*60)) # Cache for 12 hours
return ownr
def get_seller_statistics(self, realm, owner, owner_realm, cache=True):
key = 'sh:{0}:{1}{2}'.format(realm.id, owner, owner_realm)
lm = realm.lastModified
if cache is True:
c = self.cache.get_json(key=key, last_modified=lm)
if c is not None:
return c
if isinstance(owner_realm, list):
or_query = {
'$in': owner_realm
}
else:
or_query = owner_realm
query = [
{'$match': {
'realm': int(realm.id),
'owner': owner.title(),
'ownerRealm': or_query
}},
{'$lookup': {
'from': 'Item',
'localField': 'item',
'foreignField': '_id',
'as': 'itemDetails'
}}
]
cursor = self.db.Auction.aggregate(query)
history_query = {'time': {
'$gte': int(time.time()) - (60 * 60 * 24 * 14)
},
'realm': int(realm.id),
'owner': owner.title(),
'ownerRealm': or_query
}
history_cursor = self.db.SellerHistory.find(history_query)
seller_history = [
[h['time'], h['auctionCount']] for h in history_cursor]
auctions = []
item_count = {}
max_buyout = 0
max_buyout_id = 0
buyout_value = 0
for auction in cursor:
del(auction['_id'])
auction['buyoutPer'] = math.ceil(auction['buyout'] /
auction['quantity'])
auctions.append(auction)
if auction['item'] != 82800:
buyout_value += auction['buyout']
# Get rid of pet cages
if auction['item'] not in item_count:
item_count[auction['item']] = 0
item_count[auction['item']] += 1
if auction['buyout'] > max_buyout:
max_buyout = auction['buyout']
max_buyout_id = auction['item']
import operator
most_common = max(item_count.items(), key=operator.itemgetter(1))
auctions = sorted(auctions, key=lambda x: (x['timeLeft'],
-x['item'],
x['buyoutPer'],
x['quantity']))
return_dict = {
'auctions': auctions,
'buyout_value': buyout_value,
'most_expensive': {'item': max_buyout_id, 'amount': max_buyout},
'most_common': {'item': most_common[0], 'count': most_common[1]},
'seller_history': seller_history
}
# Don't cache if the realm is currently updating
if self.is_realm_running(realm=realm) is False:
self.cache.set_json(key=key, data=return_dict, last_modified=lm)
return return_dict
def get_region_stats_for_item(self, realm, item_id, cache=True):
key = 'rs:{0}:{1}'.format(realm.region, item_id)
if cache is True:
c = self.cache.get(key=key)
if c is not None:
return json.loads(c.decode('utf-8'))
price_history = self.price_history(realm=realm, item_id=item_id,
region=True)
realm_ids = self.realm_ids_for_region(realm.region)
avg_cursor = self.db.PriceHistory.aggregate([
{'$match': {
'realm': {'$in': realm_ids},
'item': int(item_id)
}},
{'$sort': {
'time': -1
}},
{'$group': {
'_id': {'realm': '$realm'},
'mp': {'$last': '$market_price'},
}}
])
qty_cursor = self.db.Auction.aggregate([
{'$match': {
'realm': {'$in': realm_ids},
'item': int(item_id),
'buyout': {'$gt': 1}
}},
{'$group': {
'_id': None,
'qty': {'$sum': '$quantity'},
'num_auc': {'$sum': 1}
}}
])
mps = []
qty = []
num_auc = []
for c in avg_cursor:
mps.append(c['mp'])
for c in qty_cursor:
num_auc.append(c['num_auc'])
qty.append(c['qty'])
if len(mps):
data_dict = {
'avg_market': price_history['avg_market'],
'median_market': price_history['median_market'],
'std_dev': price_history['std_dev'],
'avg_mp': helpers.average(mps),
'total_qty': sum(qty),
'num_auc': sum(num_auc)
}
else:
data_dict = {
'avg_market': 0,
'median_market': 0,
'std_dev': 0,
'avg_mp': 0,
'total_qty': 0,
'num_auc': 0
}
self.cache.set(key=key, value=json.dumps(data_dict), ex=30)
return data_dict
def price_history(self, realm, item_id, region=False):
ph_date = int(time.time()) - (60*60*24*14)
if region:
realm_search = {
'$in': self.realm_ids_for_region(region=realm.region)
}
else:
realm_search = realm.id
cursor = self.db.PriceHistory.find({
'realm': realm_search,
'item': int(item_id),
'time': {'$gte': ph_date}
})
mps = []
price_history = []
ph_mp_chart = []
ph_qty_chart = []
for c in cursor:
price_history.append(c)
mps.append(c['market_price'])
ph_mp_chart.append([c['time']*1000, c['market_price']])
ph_qty_chart.append([c['time']*1000, c['total_qty']])
if len(mps):
avg_market = helpers.average(mps)
median_market = helpers.median(mps)
st_dev = helpers.std_dev(data=mps, population=True)
price_history = [c for c in cursor]
return {
'avg_market': avg_market,
'median_market': median_market,
'std_dev': st_dev,
'raw': price_history,
'ph_mp_chart': ph_mp_chart,
'ph_qty_chart': ph_qty_chart
}
else:
return {
'avg_market': 0,
'median_market': 0,
'std_dev': 0,
'raw': [],
'ph_mp_chart': [],
'ph_qty_chart': []
}
def realm_ids_for_region(self, region, cache=True):
key = 'rid:region:{0}'.format(region)
if cache:
c = self.cache.get(key)
if c is not None:
return json.loads(c.decode('utf-8'))
realms = self.db.Realm.find({'region': region})
ids = [r['_id'] for r in realms]
self.cache.set(key, json.dumps(ids), ex=(12*60*60)) # 12 hrs
return ids
def get_realm_from_url(self, region, slug):
result = self.db.Realm.find_one({'region': region, 'realms': slug})
return Realm(**result) if result is not None else None
def generate_auctions_for_display(self, auctions, region, slug):
for i, auction in enumerate(auctions):
if not 'bp' in auctions[i]:
bp = math.ceil(auction['buyout'] / auction['quantity'])
auctions[i]['bp'] = bp
slugs = self.get_slugs_by_owner_realm()
auctions = sorted(auctions, key=lambda x: (x['bp'], x['owner'],
x['quantity']))
dt = ''
for auction in auctions:
or_slug = slugs[auction['ownerRealm']]
dt += '<tr>'
dt += '<td class="numAuctions col-md-1">1</td>'
dt += '<td>{0}</td>'.format(auction['quantity'])
dt += '<td><a href="/{region}/{realm_slug}/seller/' \
'{owner_l}-{owner_realm}">{owner}</a></td>' \
.format(region=region, realm_slug=slug,
owner=auction['owner'],
owner_l=auction['owner'].lower(),
owner_realm=or_slug)
dt += '<td>{0}</td>'.format(time_left_string(auction['timeLeft']))
dt += '<td>{0}</td>'.format(gold_string(auction['buyout']))
dt += '<td>{0}</td>'.format(gold_string(auction['bp']))
dt += '</tr>'
return dt
@staticmethod
def sortkeypicker(keynames):
negate = set()
for i, k in enumerate(keynames):
if k[:1] == '-':
keynames[i] = k[1:]
negate.add(k[1:])
def getit(adict):
composite = [adict[kn] for kn in keynames]
for l, (m, v) in enumerate(zip(keynames, composite)):
if m in negate:
composite[l] = -v
return composite
return getit
| mit | 3,258,545,019,466,609,700 | 33.502959 | 79 | 0.475219 | false |
Som-Energia/somenergia-generationkwh | som_generationkwh/account_invoice.py | 1 | 6861 | # coding=utf-8
from osv import osv
import generationkwh.investmentmodel as gkwh
class AccountInvoice(osv.osv):
_name = 'account.invoice'
_inherit = 'account.invoice'
def unpay(self, cursor, uid, ids, amount, pay_account_id, period_id,
pay_journal_id, context=None, name=''):
res = super(AccountInvoice, self).unpay(
cursor, uid, ids, amount, pay_account_id, period_id, pay_journal_id,
context, name
)
#TODO: Untested
Investment = self.pool.get('generationkwh.investment')
for invoice_id in ids:
investment_id = self.get_investment(cursor,uid,invoice_id)
if not investment_id: continue
if not self.is_investment_payment(cursor,uid,invoice_id):continue
moveline_id = self.investment_last_moveline(cursor,uid,invoice_id)
Investment.mark_as_unpaid(cursor,uid,[investment_id],moveline_id)
return res
def pay_and_reconcile(self, cursor, uid, ids, pay_amount,
pay_account_id, period_id, pay_journal_id,
writeoff_acc_id, writeoff_period_id,
writeoff_journal_id, context=None, name=''):
res = super(AccountInvoice, self).pay_and_reconcile(
cursor, uid, ids, pay_amount, pay_account_id, period_id,
pay_journal_id, writeoff_acc_id, writeoff_period_id,
writeoff_journal_id, context, name
)
#TODO: Untested
from datetime import date
today = str(date.today()) #TODO date more real?
Investment = self.pool.get('generationkwh.investment')
for invoice_id in ids:
investment_id = self.get_investment(cursor,uid,invoice_id)
if not investment_id: continue
moveline_id = self.investment_last_moveline(cursor,uid,invoice_id)
if not self.is_investment_payment(cursor,uid,invoice_id):
continue
Investment.mark_as_paid(cursor,uid,[investment_id],today,moveline_id)
return res
def is_investment_payment(self, cursor, uid, invoice_id):
invoice = self.read(cursor, uid, invoice_id, ['name'])
return invoice and 'name' in invoice and str(invoice['name']).endswith("-JUST")
def get_investment(self, cursor, uid, inv_id):
invoice = self.browse(cursor, uid, inv_id)
Investment = self.pool.get('generationkwh.investment')
Emission = self.pool.get('generationkwh.emission')
investment_ids = Investment.search(cursor, uid, [
('name','=',invoice.origin),
])
if not investment_ids:
return None
investment_data = Investment.read(cursor, uid, investment_ids[0], ['emission_id'])
emission_data = Emission.read(cursor, uid, investment_data['emission_id'][0], ['journal_id'])
if not invoice.journal_id: return None # TODO: Test missing
Journal = self.pool.get('account.journal')
journal_id_gen = emission_data['journal_id'][0]
if invoice.journal_id.id != journal_id_gen:
return None
return investment_ids[0]
def investment_last_moveline(self, cursor, uid, invoice_id):
"""
For an investment invoice, gets the last moveline against
non bridge account.
Intended use is to recover the invoicing account movement
after a payment or an unpayment. DO NOT USE FOR ANY OTHER
PURPOSE WITHOUT A DOUBLE CHECK.
"""
invoice = self.read(cursor, uid, invoice_id, [
'journal_id',
'name',
'origin'
])
Account = self.pool.get('account.account')
Investment = self.pool.get('generationkwh.investment')
Emission = self.pool.get('generationkwh.emission')
inv_id = Investment.search(cursor, uid, [('name','=',invoice['origin'])])
inv_data = Investment.read(cursor, uid, inv_id[0], ['emission_id'])
em_data = Emission.read(cursor, uid, inv_data['emission_id'][0], ['bridge_account_payments_id'])
account_id = em_data['bridge_account_payments_id'][0]
MoveLine = self.pool.get('account.move.line')
ids = MoveLine.search(cursor, uid, [
('ref', '=', invoice['name']),
('journal_id', '=', invoice['journal_id'][0]),
('account_id', '<>', account_id),
])
moveline_ids = sorted(ids)
if not moveline_ids: return False # TODO: Untested case
return moveline_ids[-1]
AccountInvoice()
class TesthelperPaymentWizard(osv.osv_memory):
_name = 'generationkwh.payment.wizard.testhelper'
_auto = False
def unpay(self, cursor, uid, invoice_id, movelinename):
IrModelData = self.pool.get('ir.model.data')
model, journal_id = IrModelData.get_object_reference(
cursor, uid,
'som_generationkwh', 'genkwh_journal',
)
Invoice = self.pool.get('account.invoice')
invoice = Invoice.read(cursor, uid, invoice_id, [
'amount_total',
'account_id',
])
Wizard = self.pool.get('wizard.unpay')
from datetime import date
wizard_id = Wizard.create(cursor, uid, dict(
name = movelinename,
date = date.today(),
amount = invoice['amount_total'],
pay_journal_id=journal_id,
pay_account_id=invoice['account_id'],
))
wizard = Wizard.browse(cursor, uid, wizard_id)
wizard.unpay(dict(
model = 'account.invoice',
active_ids = [invoice_id],
))
def pay(self, cursor, uid, invoice_id, movelinename):
from addons.account.wizard.wizard_pay_invoice import _pay_and_reconcile as wizard_pay
Invoice = self.pool.get('account.invoice')
IrModelData = self.pool.get('ir.model.data')
pending = Invoice.read(cursor, uid, invoice_id, ['residual'])['residual']
model, journal_id = IrModelData.get_object_reference(
cursor, uid,
'som_generationkwh', 'genkwh_journal',
)
# The period
Period = self.pool.get('account.period')
from datetime import datetime
today = datetime.today()
period_name = today.strftime('%m/%Y')
period_id = Period.search(cursor, uid, [
('name', '=', period_name),
])[0]
wizard_pay(self, cursor, uid, data=dict(
id = invoice_id,
ids = [invoice_id],
form = dict(
amount=pending,
name=movelinename,
journal_id=journal_id,
period_id=period_id,
date="2017-08-03", # TODO: Magic date
),
), context={})
TesthelperPaymentWizard()
# vim: et ts=4 sw=4
| agpl-3.0 | 2,882,396,766,573,224,400 | 35.494681 | 104 | 0.585192 | false |
nicowollenzin/Raspi_GPIO_examles | LEDs/ampel.py | 1 | 1750 | ########################################################################
# ampel.py - Trafficlight simulation
#
# Copyright (C) 2013 Nico Wollenzin
#
# This file is part of Raspi_GPIO_Examples.
#
# ampel.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# ampel.py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
########################################################################
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup( 3,GPIO.IN)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(13,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
status = "rot"
def init():
GPIO.output(11,1) #rote Ampel
GPIO.output(13,0)
GPIO.output(15,0)
def set_gruen():
time.sleep(0.5)
GPIO.output(13,1)
time.sleep(1)
GPIO.output(11,0)
GPIO.output(13,0)
GPIO.output(15,1)
def set_rot():
time.sleep(0.5)
GPIO.output(15,0)
GPIO.output(13,1)
time.sleep(1)
GPIO.output(13,0)
GPIO.output(11,1)
try:
init()
while True:
if(GPIO.input(3) == False):
if(status == "rot"):
print "Ampel Gruen"
status = "gruen"
set_gruen()
else:
if(status == "gruen"):
print "Ampel Rot"
status = "rot"
set_rot()
except:
GPIO.cleanup()
| gpl-2.0 | 3,021,520,744,642,059,000 | 24.362319 | 72 | 0.600571 | false |
cmunk/protwis | construct/migrations/0002_auto_20180117_1457.py | 3 | 1640 | # Generated by Django 2.0.1 on 2018-01-17 13:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('ligand', '0001_initial'),
('construct', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='crystallizationligandconc',
name='ligand',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ligand.Ligand'),
),
migrations.AddField(
model_name='crystallizationligandconc',
name='ligand_role',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ligand.LigandRole'),
),
migrations.AddField(
model_name='crystallization',
name='chemical_lists',
field=models.ManyToManyField(to='construct.ChemicalList'),
),
migrations.AddField(
model_name='crystallization',
name='crystal_method',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='construct.CrystallizationMethods'),
),
migrations.AddField(
model_name='crystallization',
name='crystal_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='construct.CrystallizationTypes'),
),
migrations.AddField(
model_name='crystallization',
name='ligands',
field=models.ManyToManyField(to='construct.CrystallizationLigandConc'),
),
]
| apache-2.0 | -4,431,017,242,828,026,400 | 33.893617 | 131 | 0.615854 | false |
gditzler/Miscellaneous-IPython-Notebooks | src/bmu.py | 1 | 2924 | #!/usr/bin/env python
import json
import numpy
import scipy.sparse as sp
from optparse import OptionParser
__author__ = "Gregory Ditzler"
__copyright__ = "Copyright 2014, EESI Laboratory (Drexel University)"
__credits__ = ["Gregory Ditzler"]
__license__ = "GPL"
__version__ = "0.1.0"
__maintainer__ = "Gregory Ditzler"
__email__ = "[email protected]"
def load_biom(fname):
"""
load a biom file and return a dense matrix
:fname - string containing the path to the biom file
:data - numpy array containing the OTU matrix
:samples - list containing the sample IDs (important for knowing
the labels in the data matrix)
:features - list containing the feature names
"""
o = json.loads(open(fname,"U").read())
if o["matrix_type"] == "sparse":
data = load_sparse(o)
else:
data = load_dense(o)
samples = []
for sid in o["columns"]:
samples.append(sid["id"])
features = []
for sid in o["rows"]:
# check to see if the taxonomy is listed, this will generally lead to more
# descriptive names for the taxonomies.
if sid.has_key("metadata") and sid["metadata"] != None:
if sid["metadata"].has_key("taxonomy"):
#features.append(str( \
# sid["metadata"]["taxonomy"]).strip( \
# "[]").replace(",",";").replace("u'","").replace("'",""))
features.append(json.dumps(sid["metadata"]["taxonomy"]))
else:
features.append(sid["id"])
else:
features.append(sid["id"])
return data, samples, features
def load_dense(obj):
"""
load a biom file in dense format
:obj - json dictionary from biom file
:data - dense data matrix
"""
n_feat,n_sample = obj["shape"]
data = np.array(obj["data"])
return data.transpose()
def load_sparse(obj):
"""
load a biom file in sparse format
:obj - json dictionary from biom file
:data - dense data matrix
"""
n_feat,n_sample = obj["shape"]
data = numpy.zeros((n_feat, n_sample))
for val in obj["data"]:
data[val[0], val[1]] = val[2]
data = data.transpose()
return data
def load_map(fname):
"""
load a map file. this function does not have any dependecies on qiime's
tools. the returned object is a dictionary of dictionaries. the dictionary
is indexed by the sample_ID and there is an added field for the the
available meta-data. each element in the dictionary is a dictionary with
the keys of the meta-data.
:fname - string containing the map file path
:meta_data - dictionary containin the mapping file information
"""
f = open(fname, "U")
mfile = []
for line in f:
mfile.append(line.replace("\n","").replace("#","").split("\t"))
meta_data_header = mfile.pop(0)
meta_data = {}
for sample in mfile:
sample_id = sample[0]
meta_data[sample_id] = {}
for identifier, value in map(None, meta_data_header, sample):
meta_data[sample_id][identifier] = value
return meta_data
| gpl-3.0 | 4,695,000,150,303,280,000 | 29.778947 | 79 | 0.645007 | false |
ekarlso/python-odlclient | odlclient/v2/ovsdb.py | 1 | 1844 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from odlclient.v2.base import Manager
class OvsdbManager(Manager):
base = 'ovsdb/nb/v2'
def list(self, node_type, node_id, table_name):
url = self._url('node', node_type, node_id,
'tables', table_name, 'rows')
return self._list(url, return_raw=True)
def create(self, node_type, node_id, table_name, data):
url = self._url('node', node_type, node_id,
'tables', table_name, 'rows')
return self._post(url, data, return_raw=True)
def get(self, node_type, node_id, table_name, row_uuid):
url = self._url('node', node_type, node_id,
'tables', table_name, 'rows', row_uuid)
return self._get(url, return_raw=True)
def delete(self, node_type, node_id, table_name, row_uuid):
url = self._url('node', node_type, node_id,
'tables', table_name, 'rows', row_uuid)
return self._delete(url)
def update(self, node_type, node_id, table_name, row_uuid):
url = self._url('node', node_type, node_id,
'tables', table_name, 'rows', row_uuid)
return self._put(url, return_raw=True)
| apache-2.0 | 7,632,807,913,050,004,000 | 39.977778 | 75 | 0.631236 | false |
jbasko/configmanager | tests/test_hooks.py | 1 | 12419 | import pytest
from configmanager import Config, NotFound, Section, Item
from configmanager.utils import not_set
def sub_dict(dct, keys):
"""
Helper for tests that creates a dictionary from the given dictionary
with just the listed keys included.
"""
return {k: dct[k] for k in keys if k in dct}
def test_hooks_available_on_all_sections():
config = Config({
'uploads': {
'db': {
'user': 'root',
}
}
})
assert config.uploads.hooks
assert config.hooks
assert config.uploads.db.hooks
with pytest.raises(AttributeError):
_ = config.uploads.db.user.hooks
def test_not_found_hook():
calls = []
config = Config({
'uploads': Section()
})
@config.hooks.not_found
def first_hook(*args, **kwargs):
calls.append(('first', args, sub_dict(kwargs, ('section', 'name'))))
@config.hooks.not_found
def second_hook(*args, **kwargs):
calls.append(('second', args, sub_dict(kwargs, ('section', 'name'))))
assert len(calls) == 0
with pytest.raises(NotFound):
_ = config.db
assert len(calls) == 2
assert calls[0] == ('first', (), {'section': config, 'name': 'db'})
assert calls[1] == ('second', (), {'section': config, 'name': 'db'})
with pytest.raises(NotFound):
_ = config.uploads.threads
assert len(calls) == 4
assert calls[2] == ('first', (), {'section': config.uploads, 'name': 'threads'})
assert calls[3] == ('second', (), {'section': config.uploads, 'name': 'threads'})
# A hook that creates the missing item so further calls won't trigger
# the hook handlers again, including any subsequent hook handlers as part of current event.
@config.hooks.not_found
def third_hook(*args, **kwargs):
calls.append(('third', args, sub_dict(kwargs, ('section', 'name'))))
assert kwargs['section']
assert kwargs['name']
item = kwargs['section'].create_item(name=kwargs['name'])
kwargs['section'].add_item(item.name, item)
return item
# Fourth hook will never be called because the third hook already resolves the missing name
@config.hooks.not_found
def fourth_hook(*args, **kwargs):
calls.append(('fourth', args, sub_dict(kwargs, ('section', 'name'))))
assert len(calls) == 4
assert config.uploads.threads
assert len(calls) == 7
assert calls[4] == ('first', (), {'section': config.uploads, 'name': 'threads'})
assert calls[5] == ('second', (), {'section': config.uploads, 'name': 'threads'})
assert calls[6] == ('third', (), {'section': config.uploads, 'name': 'threads'})
assert config.uploads.threads
assert len(calls) == 7
def test_item_added_to_section_hook():
calls = []
config = Config({
'uploads': {
'db': {
'user': 'root',
}
}
})
@config.hooks.item_added_to_section
def item_added_to_section(*args, **kwargs):
calls.append(('first', args, sub_dict(kwargs, ('section', 'subject', 'alias'))))
@config.hooks.item_added_to_section
def item_added_to_section2(*args, **kwargs):
calls.append(('second', args, sub_dict(kwargs, ('section', 'subject', 'alias'))))
assert calls == []
# Adding a section to a section is unrelated
config.add_section('downloads', config.create_section())
assert calls == []
password = config.create_item(name='password')
threads = config.create_item(name='threads', default=5)
assert calls == []
config.uploads.db.add_item(password.name, password)
# Note that the item added to Config is actually a different instance to the one that was passed to add_item.
# This is because we do deepcopy in add_item.
assert calls == [
('first', (), {'section': config.uploads.db, 'subject': config.uploads.db.password, 'alias': 'password'}),
('second', (), {'section': config.uploads.db, 'subject': config.uploads.db.password, 'alias': 'password'}),
]
config.uploads.add_item('threads_alias', threads)
assert len(calls) == 4
assert calls[2:] == [
('first', (), {'section': config.uploads, 'subject': config.uploads.threads, 'alias': 'threads_alias'}),
('second', (), {'section': config.uploads, 'subject': config.uploads.threads, 'alias': 'threads_alias'}),
]
def test_callback_returning_something_cancels_parent_section_hook_handling():
config = Config({
'uploads': {
'db': {
'user': 'root',
}
}
})
calls = []
@config.hooks.item_added_to_section
def root_handler(**kwargs):
calls.append('root')
@config.uploads.hooks.item_added_to_section
def uploads_handler(**kwargs):
calls.append('uploads')
@config.uploads.db.hooks.item_added_to_section
def db_handler(**kwargs):
calls.append('db')
assert calls == []
config.uploads.db.add_item('password', config.create_item(name='password'))
assert calls == ['db', 'uploads', 'root']
@config.uploads.hooks.item_added_to_section
def another_uploads_handler(**kwargs):
calls.append('uploads2')
return True
# Root hooks are not handled because uploads2 returned something
config.uploads.db.add_item('host', config.create_item(name='host'))
assert len(calls) == 6
assert calls[-3:] == ['db', 'uploads', 'uploads2']
# Root hook is handled because the event happens on root level
config.add_item('greeting', config.create_item(name='greeting'))
assert len(calls) == 7
assert calls[-1:] == ['root']
def test_section_added_to_section_hook():
calls = []
config = Config({
'uploads': {
'db': {
'user': 'root',
}
}
})
@config.hooks.section_added_to_section
def section_added_to_section1(*args, **kwargs):
calls.append(('on_root', args, sub_dict(kwargs, ('section', 'subject', 'alias'))))
@config.uploads.hooks.section_added_to_section
def section_added_to_section2(*args, **kwargs):
calls.append(('on_uploads', args, sub_dict(kwargs, ('section', 'subject', 'alias'))))
assert calls == []
config.add_section('downloads', config.create_section())
assert len(calls) == 1
assert calls[-1:] == [
('on_root', (), {'subject': config.downloads, 'alias': 'downloads', 'section': config}),
]
config.uploads.db.add_section('backups', config.create_section())
assert len(calls) == 3
assert calls[-2:] == [
('on_uploads', (), {'subject': config.uploads.db.backups, 'alias': 'backups', 'section': config.uploads.db}),
('on_root', (), {'subject': config.uploads.db.backups, 'alias': 'backups', 'section': config.uploads.db}),
]
def test_item_value_changed_hook():
config = Config({
'uploads': {
'db': {
'user': 'root',
}
}
})
calls = []
@config.hooks.item_value_changed
def item_value_changed(old_value=None, new_value=None, item=None, **kwargs):
calls.append((item, old_value, new_value))
assert calls == []
config.reset()
assert calls == []
config.uploads.db.user.set('admin')
assert len(calls) == 1
assert calls[-1] == (config.uploads.db.user, not_set, 'admin')
config.uploads.db.user.value = 'Administrator'
assert len(calls) == 2
assert calls[-1] == (config.uploads.db.user, 'admin', 'Administrator')
config.load_values({'uploads': {'something_nonexistent': True}})
assert len(calls) == 2
config.load_values({'uploads': {'db': {'user': 'NEW DEFAULT'}}}, as_defaults=True)
assert len(calls) == 2
config.load_values({'uploads': {'db': {'user': 'NEW VALUE'}}})
assert len(calls) == 3
assert calls[-1] == (config.uploads.db.user, 'Administrator', 'NEW VALUE')
def test_item_value_changed_reports_not_set_as_old_value_if_there_was_no_value_before():
config = Config({'a': 'aaa'})
calls = []
def first(old_value, new_value):
assert old_value is not_set
assert new_value == 'bbb'
calls.append(1)
def second(old_value, new_value):
assert old_value == 'bbb'
assert new_value == 'aaa'
calls.append(2)
config.hooks.register_hook('item_value_changed', first)
config.a.value = 'bbb'
config.hooks.unregister_hook('item_value_changed', first)
config.hooks.register_hook('item_value_changed', second)
config.a.value = 'aaa'
config.hooks.unregister_hook('item_value_changed', second)
assert calls == [1, 2]
def test_item_value_changed_hook_called_on_item_reset():
config = Config({'a': 'aaa', 'b': 'bbb', 'c': Item()})
calls = []
@config.hooks.item_value_changed
def item_value_changed(item, old_value, new_value):
calls.append(item.name)
assert len(calls) == 0
config.reset()
assert len(calls) == 0
# Setting same value as default value triggers the event
config.a.value = 'aaa'
assert calls == ['a']
# Setting same value as the custom value before triggers the event
config.a.value = 'aaa'
assert calls == ['a', 'a']
# Actual reset
config.reset()
assert calls == ['a', 'a', 'a']
def test_item_value_changed_hook_not_called_when_resetting_a_not_set():
config = Config({'a': Item()})
@config.hooks.item_value_changed
def item_value_changed(item, old_value, new_value):
raise AssertionError('This should not have been called')
config.reset()
config.a.value = not_set
def test_hooks_arent_handled_if_hooks_enabled_setting_is_set_to_falsey_value():
config = Config({
'uploads': {
'db': {
'user': 'root'
}
}
})
calls = []
@config.hooks.item_value_changed
def item_value_changed(**kwargs):
calls.append(1)
config.uploads.db.user.value = 'admin1'
assert len(calls) == 1
config.uploads.db.user.value = 'admin2'
assert len(calls) == 2
config.settings.hooks_enabled = False
config.uploads.db.user.value = 'admin3'
assert len(calls) == 2
config.settings.hooks_enabled = None
config.uploads.db.user.value = 'admin4'
assert len(calls) == 2
config.settings.hooks_enabled = True
config.uploads.db.user.value = 'admin5'
assert len(calls) == 3
def test_hooks_work_across_nested_configs():
config = Config({
'a': Config({
'aa': Config({
'aaa': 'aaa-default',
}),
'ab': {
'aba': 'aba-default',
},
'ac': 'ac-default',
}),
'b': {
'ba': Config({
'baa': 'baa-default',
}),
'bb': {
'bba': 'bba-default',
},
'bc': 'bc-default',
},
'c': 'c-default',
})
calls = []
@config.hooks.item_value_changed
def item_value_changed(item):
calls.append(('root', '.'.join(item.get_path())))
assert len(calls) == 0
config.c.value = 'c-1'
assert len(calls) == 1
config.a.ac.value = 'ac-1'
assert len(calls) == 2
config.a.aa.aaa.value = 'aaa-1'
assert len(calls) == 3
config.a.ab.aba.value = 'aba-1'
assert len(calls) == 4
config.b.bc.value = 'bc-1'
assert len(calls) == 5
config.b.ba.baa.value = 'baa-1'
assert len(calls) == 6
config.b.bb.bba.value = 'bba-1'
assert len(calls) == 7
def test_not_found_hook_not_handled_if_contains_raises_not_found(simple_config):
calls = []
@simple_config.hooks.not_found
def not_found(**kwargs):
calls.append(kwargs)
assert len(calls) == 0
assert 'downloads' not in simple_config
assert len(calls) == 0
def test_not_found_hook_handled_in_iterators(simple_config):
calls = []
@simple_config.hooks.not_found
def not_found(**kwargs):
calls.append(kwargs)
assert len(calls) == 0
with pytest.raises(NotFound):
list(simple_config.iter_items(path='uploads.downloads.leftloads.rightloads', recursive=True))
assert len(calls) == 1
with pytest.raises(NotFound):
list(simple_config.iter_paths(path='uploads.downloads.leftloads.rightloads', recursive=True))
assert len(calls) == 2
| mit | 7,472,194,234,110,773,000 | 26.845291 | 117 | 0.58958 | false |
yannikbehr/spectroscopy | src/spectroscopy/class_factory.py | 1 | 32926 | """
Generate classes defined in the datamodel.
"""
import collections
from copy import deepcopy
import datetime
import hashlib
import inspect
from uuid import uuid4
import warnings
import weakref
import numpy as np
import tables
import spectroscopy.util
class ResourceIdentifier(object):
"""
Unique identifier of any resource so it can be referred to.
All elements of a Dataset instance have a unique id that other elements
use to refer to it. This is called a ResourceIdentifier.
In this class it can be any hashable object, e.g. most immutable objects
like numbers and strings.
:type id: str, optional
:param id: A unique identifier of the element it refers to. It is
not verified, that it actually is unique. The user has to take care of
that. If no resource_id is given, uuid.uuid4() will be used to
create one which assures uniqueness within one Python run.
If no fixed id is provided, the ID will be built from prefix
and a random uuid hash. The random hash part can be regenerated by the
referred object automatically if it gets changed.
:type prefix: str, optional
:param prefix: An optional identifier that will be put in front of any
automatically created resource id. The prefix will only have an effect
if `id` is not specified (for a fixed ID string). Makes automatically
generated resource ids more reasonable.
:type referred_object: Python object, optional
:param referred_object: The object this instance refers to. All instances
created with the same resource_id will be able to access the object as
long as at least one instance actual has a reference to it.
.. rubric:: General Usage
>>> ResourceIdentifier('2012-04-11--385392')
ResourceIdentifier(id="2012-04-11--385392")
>>> # If 'id' is not specified it will be generated automatically.
>>> ResourceIdentifier() # doctest: +ELLIPSIS
ResourceIdentifier(id="...")
>>> # Supplying a prefix will simply prefix the automatically generated ID
>>> ResourceIdentifier(prefix='peru09') # doctest: +ELLIPSIS
ResourceIdentifier(id="peru09_...")
ResourceIdentifiers can, and oftentimes should, carry a reference to the
object they refer to. This is a weak reference which means that if the
object gets deleted or runs out of scope, e.g. gets garbage collected, the
reference will cease to exist.
>>> class A(object): pass
>>> a = A()
>>> import sys
>>> ref_count = sys.getrefcount(a)
>>> res_id = ResourceIdentifier(referred_object=a)
>>> # The reference does not change the reference count of the object.
>>> print(ref_count == sys.getrefcount(a))
True
>>> # It actually is the same object.
>>> print(a is res_id.get_referred_object())
True
>>> # Deleting it, or letting the garbage collector handle the object will
>>> # invalidate the reference.
>>> del a
>>> print(res_id.get_referred_object())
None
The most powerful ability (and reason why one would want to use a resource
identifier class in the first place) is that once a ResourceIdentifier with
an attached referred object has been created, any other ResourceIdentifier
instances with the same ID can retrieve that object. This works
across all ResourceIdentifiers that have been instantiated within one
Python run.
This enables, e.g. the resource references between the different elements
to work in a rather natural way.
>>> a = A()
>>> obj_id = id(a)
>>> res_id = "someid"
>>> ref_a = ResourceIdentifier(res_id)
>>> # The object is refers to cannot be found yet. Because no instance that
>>> # an attached object has been created so far.
>>> print(ref_a.get_referred_object())
None
>>> # This instance has an attached object.
>>> ref_b = ResourceIdentifier(res_id, referred_object=a)
>>> ref_c = ResourceIdentifier(res_id)
>>> # All ResourceIdentifiers will refer to the same object.
>>> assert(id(ref_a.get_referred_object()) == obj_id)
>>> assert(id(ref_b.get_referred_object()) == obj_id)
>>> assert(id(ref_c.get_referred_object()) == obj_id)
ResourceIdentifiers are considered identical if the IDs are
the same.
>>> # Create two different resource identifiers.
>>> res_id_1 = ResourceIdentifier()
>>> res_id_2 = ResourceIdentifier()
>>> assert(res_id_1 != res_id_2)
>>> # Equalize the IDs. NEVER do this. This is just an example.
>>> res_id_2.id = res_id_1.id = "smi:local/abcde"
>>> assert(res_id_1 == res_id_2)
ResourceIdentifier instances can be used as dictionary keys.
>>> dictionary = {}
>>> res_id = ResourceIdentifier(id="foo")
>>> dictionary[res_id] = "bar1"
>>> # The same ID can still be used as a key.
>>> dictionary["foo"] = "bar2"
>>> items = sorted(dictionary.items(), key=lambda kv: kv[1])
>>> for k, v in items: # doctest: +ELLIPSIS
... print repr(k), v
ResourceIdentifier(id="foo") bar1
...'foo' bar2
"""
# Class (not instance) attribute that keeps track of all resource
# identifier throughout one Python run. Will only store weak references and
# therefore does not interfere with the garbage collection.
# DO NOT CHANGE THIS FROM OUTSIDE THE CLASS.
__resource_id_weak_dict = weakref.WeakValueDictionary()
# Use an additional dictionary to track all resource ids.
__resource_id_tracker = collections.defaultdict(int)
def __init__(self, oid=None, prefix=None,
referred_object=None):
# Create a resource id if None is given and possibly use a prefix.
if oid is None:
self.fixed = False
self._prefix = prefix
self._uuid = str(uuid4())
else:
self.fixed = True
self.id = oid
# Append the referred object in case one is given to the class level
# reference dictionary.
if referred_object is not None:
self.set_referred_object(referred_object)
# Increment the counter for the current resource id.
ResourceIdentifier.__resource_id_tracker[self.id] += 1
def __del__(self):
if self.id not in ResourceIdentifier.__resource_id_tracker:
return
# Decrement the resource id counter.
ResourceIdentifier.__resource_id_tracker[self.id] -= 1
# If below or equal to zero, delete it and also delete it from the weak
# value dictionary.
if ResourceIdentifier.__resource_id_tracker[self.id] <= 0:
del ResourceIdentifier.__resource_id_tracker[self.id]
try:
del ResourceIdentifier.__resource_id_weak_dict[self.id]
except KeyError:
pass
def get_referred_object(self):
"""
Returns the object associated with the resource identifier.
This works as long as at least one ResourceIdentifier with the same
ID as this instance has an associate object.
Will return None if no object could be found.
"""
try:
return ResourceIdentifier.__resource_id_weak_dict[self.id]
except KeyError:
return None
def set_referred_object(self, referred_object):
"""
Sets the object the ResourceIdentifier refers to.
If it already a weak reference it will be used, otherwise one will be
created. If the object is None, None will be set.
Will also append self again to the global class level reference list so
everything stays consistent.
"""
# If it does not yet exists simply set it.
if self.id not in ResourceIdentifier.__resource_id_weak_dict:
ResourceIdentifier.__resource_id_weak_dict[self.id] = \
referred_object
return
# Otherwise check if the existing element the same as the new one. If
# it is do nothing, otherwise raise a warning and set the new object as
# the referred object.
if ResourceIdentifier.__resource_id_weak_dict[self.id] == \
referred_object:
return
msg = "The resource identifier '%s' already exists and points to " + \
"another object: '%s'." + \
"It will now point to the object referred to by the new " + \
"resource identifier."
msg = msg % (
self.id,
repr(ResourceIdentifier.__resource_id_weak_dict[self.id]))
# Always raise the warning!
warnings.warn_explicit(msg, UserWarning, __file__,
inspect.currentframe().f_back.f_lineno)
ResourceIdentifier.__resource_id_weak_dict[self.id] = \
referred_object
def copy(self):
"""
Returns a copy of the ResourceIdentifier.
>>> res_id = ResourceIdentifier()
>>> res_id_2 = res_id.copy()
>>> print(res_id is res_id_2)
False
>>> print(res_id == res_id_2)
True
"""
return deepcopy(self)
@property
def id(self):
"""
Unique identifier of the current instance.
"""
if self.fixed:
return self.__dict__.get("id")
else:
oid = self.prefix
if oid is not None and not oid.endswith("_"):
oid += "_"
oid += self.uuid
return oid
return self.uuid
@id.deleter
def id(self):
msg = "The resource id cannot be deleted."
raise Exception(msg)
@id.setter
def id(self, value):
self.fixed = True
# XXX: no idea why I had to add bytes for PY2 here
if not isinstance(value, (str, bytes)):
msg = "attribute id needs to be a string."
raise TypeError(msg)
self.__dict__["id"] = value
@property
def prefix(self):
return self._prefix
@prefix.deleter
def prefix(self):
self._prefix = ""
@prefix.setter
def prefix(self, value):
if not isinstance(value, str):
msg = "prefix id needs to be a string."
raise TypeError(msg)
self._prefix = value
@property
def uuid(self):
return self._uuid
@uuid.deleter
def uuid(self):
"""
Deleting is uuid hash is forbidden and will not work.
"""
msg = "The uuid cannot be deleted."
raise Exception(msg)
@uuid.setter
def uuid(self, value): # @UnusedVariable
"""
Setting is uuid hash is forbidden and will not work.
"""
msg = "The uuid cannot be set manually."
raise Exception(msg)
@property
def resource_id(self):
return self.id
@resource_id.deleter
def resource_id(self):
del self.id
@resource_id.setter
def resource_id(self, value):
self.id = value
def __str__(self):
return self.id
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def __repr__(self):
return 'ResourceIdentifier(id="%s")' % self.id
def __eq__(self, other):
if self.id == other:
return True
if not isinstance(other, ResourceIdentifier):
return False
if self.id == other.id:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Uses the same hash as the resource id. This means that class instances
can be used in dictionaries and other hashed types.
Both the object and it's id can still be independently used as
dictionary keys.
"""
# "Salt" the hash with a string so the hash of the object and a
# string identical to the id can both be used as individual
# dictionary keys.
return hash("RESOURCE_ID") + self.id.__hash__()
def regenerate_uuid(self):
"""
Regenerates the uuid part of the ID. Does nothing for resource
identifiers with a user-set, fixed id.
"""
self._uuid = str(uuid4())
class RetVal(object):
"""
Wrapper to make tables.array.Array read only.
"""
def __init__(self, wrapped_object):
self.__dict__['_wrapped_object'] = wrapped_object
attributes = dir(wrapped_object)
for attr in attributes:
if hasattr(self, attr):
continue
self.__dict__[attr] = attr
def __setitem__(self, key, value):
raise AttributeError('Data type is read only.')
def __setslice__(self, i, j, value):
raise AttributeError('Data type is read only.')
def __setattr__(self, key, value):
raise AttributeError('Data type is read only.')
def __getattribute__(self, key):
if key in ['_wrapped_object', '__dict__', '__class__']:
return object.__getattribute__(self, key)
return getattr(self._wrapped_object, key)
def __getitem__(self, key):
return self._wrapped_object.__getitem__(key)
def __str__(self):
return self._wrapped_object.__str__()
class H5Set(set):
"""
An hdf5 set class for tags.
"""
def __init__(self, h5node):
self.h5node = h5node
# check for already existing tags e.g. when
# reading in a file
f = self.h5node._v_file
try:
for _t in f.root.tags._v_children:
ea = f.root.tags._v_children[_t]
entries = ea[np.where(ea[:] == self.h5node._v_name.encode())]
if len(entries) > 0:
super(H5Set, self).add(_t)
except (KeyError, tables.NoSuchNodeError):
pass
def add(self, val):
"""
Add an element to list of given tag.
"""
f = self.h5node._v_file
if val in self:
return
try:
super(H5Set, self).add(val)
except Exception as e:
print(val)
raise e
try:
ea = f.root.tags._v_children[val]
except (KeyError, tables.NoSuchNodeError):
msg = "Tag {:s} has not been registered yet. "
msg += "Use the 'Dataset.register_tags' function first."
raise ValueError(msg.format(val))
found = False
for i in range(ea.nrows):
if ea[i] == '':
ea[i] = np.array(
self.h5node._v_name, dtype='S60')
found = True
break
if not found:
ea.append(
np.array([self.h5node._v_name], dtype='S60'))
def append(self, val):
"""
Append an element to the list of given tag.
"""
self.add(val)
def remove(self, val):
"""
Remove element from list of given tag.
"""
f = self.h5node._v_file
super(H5Set, self).remove(val)
ea = f.root.tags._v_children[val]
ea[np.where(ea[:] == self.h5node._v_name.encode())] = np.array(
[''], dtype='S60')
if np.all(ea[:] == np.array('', dtype='S60')):
f.remove_node('/tags/' + val)
def pop(self):
val = set.pop(self)
self.remove(val)
return val
def discard(self, val):
try:
self.remove(val)
except KeyError:
pass
def clear(self):
while True:
try:
self.pop()
except:
break
def update(self, vals):
for v in vals:
self.add(v)
def difference_update(self, vals):
for v in vals:
self.discard(v)
def _buffer_property_factory(name, datatype, reference=False):
"""
Generate properties for a buffer class based on the datatype.
"""
# the private class attribute name
attr_name = '_'+name
def setter(self, value):
self.__dict__[attr_name] = value
fset = setter
def getter(self):
return self.__dict__[attr_name]
fget = getter
if datatype[0] == np.ndarray:
if reference:
def set_reference_array(self, value):
if not isinstance(value, np.ndarray):
value = np.array(value, ndmin=1)
_t = []
for n in value:
if not isinstance(n, datatype[1]):
msg = "{:s} has to be of type: {}"
msg = msg.format(name, datatype[1])
raise ValueError(msg)
_t.append(str(getattr(n, '_resource_id')).encode('ascii'))
self.__dict__[attr_name] = np.array(_t)
fset = set_reference_array
elif datatype[1] == datetime.datetime:
# if the array contains datetime we need to convert
# it into ascii byte strings as pytables can't handle datetime
# objects
def set_datetime_array(self, value):
if not isinstance(value, np.ndarray):
value = np.array(value, ndmin=1).astype(np.str_)
_vals = []
for v in value:
_vals.append((spectroscopy.util
.parse_iso_8601(v)
.isoformat().encode('ascii')))
value = np.array(_vals)
self.__dict__[attr_name] = np.array(_vals)
fset = set_datetime_array
def get_datetime_array(self):
if self.__dict__[attr_name] is None:
return None
dts = self.__dict__[attr_name]
_vals = []
for _dt in dts:
_vals.append(_dt.decode('ascii'))
return np.array(_vals, dtype='datetime64[ms]')
fget = get_datetime_array
elif datatype[1] == np.str_:
# strings are encoded into ascii byte strings
# as this is how pytables stores strings
# internally
def set_string_array(self, value):
if not isinstance(value, np.ndarray):
value = np.array(value, ndmin=1).astype(np.str_)
_vals = []
for v in value:
_vals.append(v.encode('ascii'))
self.__dict__[attr_name] = np.array(_vals)
fset = set_string_array
def get_string_array(self):
if self.__dict__[attr_name] is None:
return None
value = self.__dict__[attr_name]
_vals = []
for v in value:
_vals.append(v.decode('ascii'))
return np.array(_vals)
fget = get_string_array
else:
def set_array(self, value):
self.__dict__[attr_name] = (np.array(value, ndmin=1).
astype(datatype[1]))
fset = set_array
else:
if reference:
def set_reference(self, value):
if value is not None:
if not isinstance(value, datatype[0]):
msg = "{:s} has to be of type: {}"
msg = msg.format(name, datatype[0])
raise ValueError(msg)
rid = str(getattr(value, '_resource_id')).encode('ascii')
else:
rid = None
self.__dict__[attr_name] = rid
fset = set_reference
elif datatype[0] == datetime.datetime:
def set_datetime(self, value):
value = (spectroscopy.util
.parse_iso_8601(value)
.isoformat())
self.__dict__[attr_name] = np.array(value.encode('ascii'))
fset = set_datetime
def get_datetime(self):
if self.__dict__[attr_name] is None:
return None
dt = self.__dict__[attr_name]
return dt.astype('datetime64[s]')
fget = get_datetime
elif datatype[0] == np.str_:
def set_string(self, value):
self.__dict__[attr_name] = value.encode('ascii')
fset = set_string
def get_string(self):
if self.__dict__[attr_name] is None:
return None
return self.__dict__[attr_name].decode('ascii')
fget = get_string
return property(fget=fget, fset=fset)
def _buffer_class_factory(class_name, class_properties=[],
class_references=[]):
"""
Class factory for buffer classes. These contain staged data, that
can then be written to the HDF5 file.
"""
cls_attrs = {}
_properties = []
# Assign class properties
for item in class_properties:
cls_attrs[item[0]] = _buffer_property_factory(item[0], item[1])
_properties.append(item[0])
cls_attrs['_properties'] = _properties
# Assign references to other elements in the datamodel
_references = []
for item in class_references:
cls_attrs[item[0]] = _buffer_property_factory(item[0], item[1],
reference=True)
_references.append(item[0])
cls_attrs['_references'] = _references
def __init__(self, **kwargs):
# Set all property values to None or the kwarg value.
for key in self._properties:
value = kwargs.pop(key, None)
setattr(self, key, value)
for key in self._references:
value = kwargs.pop(key, None)
setattr(self, key, value)
if len(list(kwargs.keys())) > 0:
msg = "The following names are not a "
msg += "property or reference of class {:s}: "
msg += ",".join(list(kwargs.keys()))
raise AttributeError(msg.format(type(self).__name__))
def __setattr__(self, key, value):
prop = getattr(self.__class__, key, None)
if isinstance(prop, property):
if value is None:
attr_name = '_'+key
self.__dict__[attr_name] = None
else:
prop.fset(self, value)
else:
raise AttributeError(
"%s is not an attribute or reference of class %s" %
(key, self.__class__.__name__))
def __str__(self):
return class_name.strip('_')
cls_attrs['__init__'] = __init__
cls_attrs['__setattr__'] = __setattr__
cls_attrs['__str__'] = __str__
return type(class_name, (object,), cls_attrs)
def _base_property_factory(name, datatype, reference=False):
"""
Generate properties for a base class based on the datatype.
"""
def getter(self):
try:
return self._root._v_attrs[name]
except KeyError:
return None
fget = getter
if datatype[0] == np.ndarray:
if reference:
def get_reference_array(self):
try:
value = self._root._v_attrs[name]
except KeyError:
return None
_t = []
for val in value:
_t.append((ResourceIdentifier(val.decode('ascii')).
get_referred_object()))
return _t
fget = get_reference_array
elif datatype[1] == datetime.datetime:
# if the array contains datetime we need to convert
# it into ascii byte strings as pytables can't handle datetime
# objects
def get_datetime_array(self):
try:
dt = getattr(self._root, name)[:]
return dt.astype('datetime64[ms]')
except tables.exceptions.NoSuchNodeError:
return None
fget = get_datetime_array
elif datatype[1] == np.str_:
# strings are encoded into ascii byte strings
# as this is how pytables stores strings
# internally
def get_string_array(self):
try:
return RetVal(getattr(self._root, name))
except tables.exceptions.NoSuchNodeError:
return None
fget = get_string_array
else:
def get_array(self):
try:
return RetVal(getattr(self._root, name))
except tables.exceptions.NoSuchNodeError:
return None
fget = get_array
else:
if reference:
def get_reference(self):
try:
value = self._root._v_attrs[name]
except KeyError:
return None
return (ResourceIdentifier(value.decode('ascii')).
get_referred_object())
fget = get_reference
elif datatype[0] == datetime.datetime:
def get_datetime(self):
try:
dt = self._root._v_attrs[name]
except KeyError:
return None
return dt.astype('datetime64[ms]')
fget = get_datetime
elif datatype[0] == np.str_:
def get_string(self):
try:
val = self._root._v_attrs[name]
except KeyError:
return None
return val.decode('ascii')
fget = get_string
elif name == 'tags':
def get_tags(self):
return self._tags
fget = get_tags
return property(fget=fget)
def _base_class_factory(class_name, class_type='base', class_properties=[],
class_references=[]):
"""
Class factory for base classes. These are thin wrappers for the
underlying HDF5 file with methods to write and retrieve data. Only
buffer class instances can be written to file and no data can be
changed once written. If a base class is extendable, it can be appended
to.
"""
cls_attrs = {}
_properties = {}
# Define class properties
for item in class_properties:
cls_attrs[item[0]] = _base_property_factory(item[0], item[1])
_properties[item[0]] = item[1]
cls_attrs['_properties'] = _properties
# Define references to other elements in the datamodel
_references = {}
for item in class_references:
cls_attrs[item[0]] = _base_property_factory(item[0], item[1],
reference=True)
_references[item[0]] = item[1]
cls_attrs['_references'] = _references
def __init__(self, h5node, data_buffer=None, pedantic=False):
# Set the parent HDF5 group after type checking
if type(h5node) is not tables.group.Group:
raise Exception("%s and %s are incompatible types." %
(type(h5node), tables.group.Group))
self.__dict__['_root'] = h5node
self.__dict__['_tags'] = H5Set(h5node)
# Every time a new object is created it gets a new resource ID
ri = ResourceIdentifier(oid=h5node._v_name, referred_object=self)
self.__dict__['_resource_id'] = ri
if not hasattr(h5node._v_attrs, 'creation_time'):
self.__dict__['creation_time'] = \
datetime.datetime.utcnow().isoformat()
h5node._v_attrs.creation_time = self.creation_time
else:
self.__dict__['creation_time'] = h5node._v_attrs.creation_time
if data_buffer is not None:
f = h5node._v_file
s = hashlib.sha224()
for key, prop_type in self._properties.items():
private_key = '_'+key
val = getattr(data_buffer, private_key)
if val is None:
continue
if key == 'tags':
for _v in val:
self._tags.add(_v)
continue
tohash = '{}'.format(val)
s.update(tohash.encode('ascii'))
if prop_type[0] == np.ndarray:
try:
shape = list(val.shape)
shape[0] = 0
at = tables.Atom.from_dtype(val.dtype)
vl = f.create_earray(h5node, key,
atom=at,
shape=tuple(shape))
except Exception as e:
print(val.dtype.type)
raise e
vl.append(val)
else:
h5node._v_attrs[key] = val
for key in self._references.keys():
private_key = '_'+key
val = getattr(data_buffer, private_key)
h5node._v_attrs[key] = val
# Add a hash column to be able to avoid adding the same
# entries more than once
h = s.digest()
ea = f.root.hash
if pedantic:
for i in range(ea.nrows):
if (h == ea[i]):
msg = "You can't add the same dataset "
msg += "more than once if 'pedantic=True'."
raise ValueError(msg)
ea.append(np.array([h], dtype='S28'))
def __str__(self):
return class_name.strip('_')
def __setattr__(self, name, value):
msg = '{} is read only.'
raise AttributeError(msg.format(self.__class__.__name__))
def __repr__(self):
msg = ''
msg += "ID: {:s}\n".format(self._root._v_name)
for key, datatype in list(self._properties.items()):
if key == 'tags':
continue
prop = getattr(self.__class__, key, None)
if isinstance(prop, property):
val = prop.fget(self)
if val is not None:
if datatype[0] == np.ndarray:
msg += "{0:s}: {1:}\n".format(key, val.shape)
else:
msg += "{0:s}: {1:}\n".format(key, val)
msg += ("Created at: {:s}\n"
.format(self._root._v_attrs.creation_time))
return msg
cls_attrs['__init__'] = __init__
cls_attrs['__str__'] = __str__
cls_attrs['__setattr__'] = __setattr__
cls_attrs['__repr__'] = __repr__
cls_attrs['__str__'] = __str__
if class_type == 'extendable':
def append(self, databuffer, pedantic=False):
s = hashlib.sha224()
for key, prop_type in self._properties.items():
private_key = '_'+key
val = getattr(databuffer, private_key)
if val is None:
continue
tohash = '{}'.format(val)
s.update(tohash.encode('ascii'))
if prop_type[0] != np.ndarray:
continue
vl = getattr(self._root, key)
vl.append(val)
h = s.digest()
f = self._root._v_file
ea = f.root.hash
if pedantic:
for i in range(ea.nrows):
if (h == ea[i]):
msg = "You can't add the same dataset "
msg += "more than once if 'pedantic=True'."
raise ValueError(msg)
ea.append(np.array([h], dtype='S28'))
self.__dict__['modification_time'] = \
datetime.datetime.utcnow().isoformat()
self._root._v_attrs.modification_time = self.modification_time
def __repr__(self):
msg = ''
msg += "ID: {:s}\n".format(self._root._v_name)
for key, datatype in list(self._properties.items()):
if key == 'tags':
continue
prop = getattr(self.__class__, key, None)
if isinstance(prop, property):
val = prop.fget(self)
if val is not None:
if datatype[0] == np.ndarray:
msg += "{0:s}: {1:}\n".format(key, val.shape)
else:
msg += "{0:s}: {1:}\n".format(key, val)
ctime = self._root._v_attrs.creation_time
msg += "Last modified at: {:s}\n".format(ctime)
mtime = getattr(self._root._v_attrs, 'modification_time',
ctime)
msg += "Created at: {:s}\n".format(mtime)
return msg
cls_attrs['append'] = append
cls_attrs['__repr__'] = __repr__
return type(class_name, (object,), cls_attrs)
| gpl-3.0 | -3,116,162,365,405,412,400 | 34.442411 | 79 | 0.53031 | false |
SCSSoftware/BlenderTools | addon/io_scs_tools/ui/workspace.py | 1 | 12526 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (C) 2013-2019: SCS Software
import os
import bpy
from bpy.types import Panel
from bl_ui.utils import PresetPanel
from io_scs_tools.utils import path as _path_utils
from io_scs_tools.utils import get_scs_globals as _get_scs_globals
from io_scs_tools.ui import shared as _shared
class _WorkspacePanelBlDefs:
"""
Defines class for showing in Blender Scene Properties window
"""
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Tool"
bl_ui_units_x = 15
layout = None # predefined Blender variable to avoid warnings in PyCharm
is_popover = None # predefined Blender variable to avoid warnings in PyCharm
@classmethod
def poll(cls, context):
return context.region.type in ('WINDOW', 'HEADER')
def get_layout(self):
"""Returns layout depending where it's drawn into. If popover create extra box to make it distinguisable between different sub-panels."""
if self.is_popover:
layout = self.layout.box().column()
else:
layout = self.layout
return layout
class SCS_TOOLS_PT_GlobalSettings(_shared.HeaderIconPanel, _WorkspacePanelBlDefs, Panel):
"""Draw global settings panel."""
bl_label = "SCS Global Settings"
def draw(self, context):
pass
class SCS_TOOLS_PT_PathSettingsPresets(PresetPanel, Panel):
bl_label = "SCS Tools Paths Presets"
preset_subdir = "io_scs_tools/paths"
preset_operator = "script.execute_preset"
preset_add_operator = "scene.scs_tools_add_path_preset"
class SCS_TOOLS_PT_PathSettings(_WorkspacePanelBlDefs, Panel):
"""Draw global path settings panel."""
bl_parent_id = SCS_TOOLS_PT_GlobalSettings.__name__
bl_label = "Path Settings"
def draw_header_preset(self, context):
SCS_TOOLS_PT_PathSettingsPresets.draw_panel_header(self.layout)
def draw(self, context):
layout = self.get_layout()
scs_globals = _get_scs_globals()
# scs tools main panel if config is being updated
layout.enabled = not scs_globals.config_update_lock
# SCS Project Path (DIR_PATH - absolute)
icon = 'SNAP_ON' if _get_scs_globals().use_alternative_bases else 'SNAP_OFF'
layout.label(text="SCS Project Base Path:", icon='FILE_FOLDER')
row = layout.row(align=True)
row.alert = not os.path.isdir(scs_globals.scs_project_path)
row.prop(scs_globals, 'scs_project_path', text="")
row.prop(scs_globals, 'use_alternative_bases', icon=icon, icon_only=True)
row.operator('scene.scs_tools_select_project_path', text="", icon='FILEBROWSER')
# Divide labels and sub paths to columns
sub_paths_layout = layout.row().split(factor=0.35)
sub_paths_left_col = sub_paths_layout.column(align=True)
sub_paths_right_col = sub_paths_layout.column(align=True)
# Trigger Actions File (FILE_PATH - relative)
icon = 'SNAP_ON' if _get_scs_globals().trigger_actions_use_infixed else 'SNAP_OFF'
sub_paths_left_col.label(text="Trigger Action Lib:")
sub_path_right_col_row = sub_paths_right_col.row(align=True)
sub_path_right_col_row.alert = not _path_utils.is_valid_trigger_actions_rel_path()
sub_path_right_col_row.prop(scs_globals, 'trigger_actions_rel_path', text="", icon='FILE_CACHE')
sub_path_right_col_row.prop(scs_globals, 'trigger_actions_use_infixed', icon=icon, icon_only=True)
sub_path_right_col_row.operator('scene.scs_tools_select_trigger_actions_lib_path', text="", icon='FILEBROWSER')
# Sign Library Directory (FILE_PATH - relative)
icon = 'SNAP_ON' if _get_scs_globals().sign_library_use_infixed else 'SNAP_OFF'
sub_paths_left_col.label(text="Sign Library:")
sub_path_right_col_row = sub_paths_right_col.row(align=True)
sub_path_right_col_row.alert = not _path_utils.is_valid_sign_library_rel_path()
sub_path_right_col_row.prop(scs_globals, 'sign_library_rel_path', text="", icon='FILE_CACHE')
sub_path_right_col_row.prop(scs_globals, 'sign_library_use_infixed', icon=icon, icon_only=True)
sub_path_right_col_row.operator('scene.scs_tools_select_sign_lib_path', text="", icon='FILEBROWSER')
# Traffic Semaphore Profile Library Directory (FILE_PATH - relative)
icon = 'SNAP_ON' if _get_scs_globals().tsem_library_use_infixed else 'SNAP_OFF'
sub_paths_left_col.label(text="Semaphore Lib:")
sub_path_right_col_row = sub_paths_right_col.row(align=True)
sub_path_right_col_row.alert = not _path_utils.is_valid_tsem_library_rel_path()
sub_path_right_col_row.prop(scs_globals, 'tsem_library_rel_path', text="", icon='FILE_CACHE')
sub_path_right_col_row.prop(scs_globals, 'tsem_library_use_infixed', icon=icon, icon_only=True)
sub_path_right_col_row.operator('scene.scs_tools_select_semaphore_lib_path', text="", icon='FILEBROWSER')
# Traffic Rules Library Directory (FILE_PATH - relative)
icon = 'SNAP_ON' if _get_scs_globals().traffic_rules_library_use_infixed else 'SNAP_OFF'
sub_paths_left_col.label(text="Traffic Rules Lib:")
sub_path_right_col_row = sub_paths_right_col.row(align=True)
sub_path_right_col_row.alert = not _path_utils.is_valid_traffic_rules_library_rel_path()
sub_path_right_col_row.prop(scs_globals, 'traffic_rules_library_rel_path', text="", icon='FILE_CACHE')
sub_path_right_col_row.prop(scs_globals, 'traffic_rules_library_use_infixed', icon=icon, icon_only=True)
sub_path_right_col_row.operator('scene.scs_tools_select_traffic_rules_lib_path', text="", icon='FILEBROWSER')
# Hookup Library Directory (DIR_PATH - relative)
sub_paths_left_col.label(text="Hookup Lib Dir:")
sub_path_right_col_row = sub_paths_right_col.row(align=True)
sub_path_right_col_row.alert = not _path_utils.is_valid_hookup_library_rel_path()
sub_path_right_col_row.prop(scs_globals, 'hookup_library_rel_path', text="", icon='FILE_FOLDER')
sub_path_right_col_row.operator('scene.scs_tools_select_hookup_lib_path', text="", icon='FILEBROWSER')
# Material Substance Library Directory (FILE_PATH - relative)
sub_paths_left_col.label(text="Mat Substance Lib:")
sub_path_right_col_row = sub_paths_right_col.row(align=True)
sub_path_right_col_row.alert = not _path_utils.is_valid_matsubs_library_rel_path()
sub_path_right_col_row.prop(scs_globals, 'matsubs_library_rel_path', text="", icon='FILE_CACHE')
sub_path_right_col_row.operator('scene.scs_tools_select_matsubs_lib_path', text="", icon='FILEBROWSER')
row = layout.row()
row.separator()
# Shader Presets File (FILE_PATH)
layout.label(text="Shader Presets Library:", icon='FILE_TEXT')
row = layout.row(align=True)
row.prop(scs_globals, 'shader_presets_use_custom', text="")
custom_path_row = row.row(align=True)
custom_path_row.enabled = scs_globals.shader_presets_use_custom
custom_path_row.alert = not _path_utils.is_valid_shader_presets_library_path()
custom_path_row.prop(scs_globals, 'shader_presets_filepath', text="")
custom_path_row.operator('scene.scs_tools_select_shader_presets_path', text="", icon='FILEBROWSER')
class SCS_TOOLS_PT_DisplaySettings(_WorkspacePanelBlDefs, Panel):
"""Draw global display settings panel."""
bl_parent_id = SCS_TOOLS_PT_GlobalSettings.__name__
bl_label = "Display Settings"
def draw(self, context):
layout = self.get_layout()
scs_globals = _get_scs_globals()
layout.use_property_split = True
layout.use_property_decorate = False
# scs tools main panel if config is being updated
layout.enabled = not scs_globals.config_update_lock
layout.prop(scs_globals, 'drawing_mode', expand=True)
layout.prop(scs_globals, 'icon_theme')
layout.prop(scs_globals, 'display_info')
row = _shared.create_row(layout, use_split=True, align=True, enabled=scs_globals.display_info != "none")
row.prop(scs_globals, 'info_text_color')
layout.prop(scs_globals, 'base_paint_color')
layout.prop(scs_globals, 'show_preview_models')
class SCS_TOOLS_PT_LocatorsDisplay(_WorkspacePanelBlDefs, Panel):
"""Draw locators display panel."""
bl_parent_id = SCS_TOOLS_PT_DisplaySettings.__name__
bl_label = "Locators Display"
bl_options = {'DEFAULT_CLOSED'}
def draw_header(self, context):
layout = self.layout
scs_globals = _get_scs_globals()
layout.enabled = not scs_globals.config_update_lock
layout.prop(scs_globals, 'display_locators', text="")
def draw(self, context):
layout = self.get_layout()
scs_globals = _get_scs_globals()
# scs tools main panel if config is being updated
layout.enabled = not scs_globals.config_update_lock
layout.use_property_split = True
layout.use_property_decorate = False
layout.enabled = scs_globals.display_locators and not scs_globals.config_update_lock
layout.prop(scs_globals, 'locator_size')
layout.prop(scs_globals, 'locator_empty_size')
layout.prop(scs_globals, 'locator_prefab_wire_color')
layout.prop(scs_globals, 'locator_model_wire_color')
layout.prop(scs_globals, 'locator_coll_wire_color')
layout.prop(scs_globals, 'locator_coll_face_color')
class SCS_TOOLS_PT_ConnectionsDisplay(_WorkspacePanelBlDefs, Panel):
"""Draw connections display panel."""
bl_parent_id = SCS_TOOLS_PT_DisplaySettings.__name__
bl_label = "Connections Display"
bl_options = {'DEFAULT_CLOSED'}
def draw_header(self, context):
layout = self.layout
scs_globals = _get_scs_globals()
layout.enabled = not scs_globals.config_update_lock
layout.prop(scs_globals, 'display_connections', text="")
def draw(self, context):
layout = self.get_layout()
scs_globals = _get_scs_globals()
layout.use_property_split = True
layout.use_property_decorate = False
layout.enabled = scs_globals.display_connections and not scs_globals.config_update_lock
layout.prop(scs_globals, 'optimized_connections_drawing')
layout.prop(scs_globals, 'curve_segments')
layout.prop(scs_globals, 'np_connection_base_color')
layout.prop(scs_globals, 'mp_connection_base_color')
layout.prop(scs_globals, 'tp_connection_base_color')
class SCS_TOOLS_PT_OtherSetttings(_WorkspacePanelBlDefs, Panel):
"""Draw global settings panel."""
bl_parent_id = SCS_TOOLS_PT_GlobalSettings.__name__
bl_label = "Other Settings"
def draw_header(self, context):
pass # disable custom icon
def draw(self, context):
"""Draw global settings panel."""
layout = self.get_layout()
scs_globals = _get_scs_globals()
# scs tools main panel if config is being updated
layout.enabled = not scs_globals.config_update_lock
_shared.draw_common_settings(layout, without_box=True)
classes = (
SCS_TOOLS_PT_GlobalSettings,
SCS_TOOLS_PT_PathSettingsPresets,
SCS_TOOLS_PT_PathSettings,
SCS_TOOLS_PT_DisplaySettings,
SCS_TOOLS_PT_LocatorsDisplay,
SCS_TOOLS_PT_ConnectionsDisplay,
SCS_TOOLS_PT_OtherSetttings,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
from io_scs_tools import SCS_TOOLS_MT_MainMenu
SCS_TOOLS_MT_MainMenu.append_props_entry("Workspace Properties", SCS_TOOLS_PT_GlobalSettings.__name__)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
| gpl-2.0 | 2,947,348,342,683,345,400 | 41.461017 | 145 | 0.674357 | false |
Signiant/aws-ip-list-service | app/views.py | 1 | 20247 | from . import awslib
from app import app
from flask import render_template
from flask import send_from_directory
import json
import yaml
from json import dumps
from os.path import join
from flask import make_response, request, redirect, url_for
import os
import time
import traceback
BUCKET_NAME = os.environ.get('IPLIST_CONFIG_BUCKET')
S3PATH = os.environ.get('IPLIST_CONFIG_PATH')
NOHTTPS = os.environ.get('NOHTTPS')
PATH = join('iplist_config', 'config')
if S3PATH == None:
print("No Env Labeled IPLIST_CONFIG_PATH")
elif BUCKET_NAME == None:
print("No bucket name specified")
else:
awslib.get_file(BUCKET_NAME, S3PATH, PATH)
#####
# Caching parameters
#####
CACHE_TIMEOUT_PERIOD_IN_SECONDS = 300
CACHE_ROOT_DIRECTORY = "/ip-range-cache"
try:
os.makedirs(CACHE_ROOT_DIRECTORY)
except:
pass
@app.route('/')
def handle_index():
redirect_to_https = None
if NOHTTPS == None:
proto = request.headers.get("X-Forwarded-Proto")
if not proto == "https":
redirect_to_https = _check_ssl(request.url)
if not redirect_to_https == None:
return redirect_to_https
with open(PATH) as config_data:
# This should handle json or yaml
data = yaml.safe_load(config_data)
app_data = []
hidden_apps = []
alt_apps = []
for app in data['apps']:
# altname list for deprecated url links
if app.get('altname'):
app_info = {}
app_info['name'] = app['altname']
app_info['additionalText'] = ''
alt_apps.append(app_info)
if app.get('hidden'):
print('Found hidden app: %s' % app['name'])
app_info = {}
app_info['name'] = app['name']
app_info['additionalText'] = ''
if app.get('additionalText'):
app_info['additionalText'] = app['additionalText']
hidden_apps.append(app_info)
else:
app_info = {}
app_info['name'] = app['name']
app_info['additionalText'] = ''
if app.get('additionalText'):
app_info['additionalText'] = app['additionalText']
app_data.append(app_info)
return render_template("index.html", apps=app_data, altapps=alt_apps, hidden=hidden_apps)
@app.route('/healthcheck')
def handle_healthcheck():
print("I'm still here. test")
return render_template("healthcheck.html")
def _read_from_cache(app_cache_file):
read_from_cache = True
try:
print(app_cache_file)
with open(app_cache_file, "r") as cache:
cache_time = float(cache.readline().strip())
current_time = time.time()
if (current_time - cache_time) > CACHE_TIMEOUT_PERIOD_IN_SECONDS:
read_from_cache = False
except IOError:
read_from_cache = False
return read_from_cache
@app.route('/all')
def handle_all_app():
with open(PATH) as config_data:
# This should handle json or yaml
data = yaml.safe_load(config_data)
app_name_list = []
for app in data['apps']:
app_name_list.append(app['name'])
output = ""
all_list = {}
for app_name in app_name_list:
verbose = False
chosen_region = None
query_string = request.query_string
modified_date = None
if not query_string == "":
for query in query_string.split(b'&'):
if b'verbose' in query.lower():
if query.endswith(b'1'):
verbose = True
elif b'region' in query.lower():
chosen_region = query[7:].decode("utf-8")
suffix = ".json"
if verbose:
suffix = ".verbose" + suffix
if chosen_region:
suffix = "." + chosen_region + suffix
app_cache_file = os.path.join(CACHE_ROOT_DIRECTORY, app_name.lower() + suffix)
app_cache_file = parse_data_from_file(app_name, chosen_region, app_cache_file, data, verbose)
with open(app_cache_file, "r") as cache:
# read the first line as cache time
cache_time = cache.readline()
line = cache.readline()
all_list[app_name] = eval(line)
output = output + line
return jsonify(**all_list)
@app.route('/<appname>')
def handle_app(appname):
verbose = False
chosen_region = None
query_string = request.query_string
modified_date = None
if not query_string == "":
for query in query_string.split(b'&'):
if b'verbose' in query.lower():
if query.endswith(b'1'):
verbose = True
elif b'region' in query.lower():
chosen_region = query[7:].decode("utf-8")
suffix = ".json"
if verbose:
suffix = ".verbose" + suffix
if chosen_region:
suffix = "." + chosen_region + suffix
app_cache_file = os.path.join(CACHE_ROOT_DIRECTORY, appname.lower() + suffix)
if _read_from_cache(app_cache_file):
print("Reading cached data for this request.")
else:
print("Cache is out of date. Refreshing for this request.")
app_cache_file = parse_data_from_file(appname, chosen_region, app_cache_file, verbose=verbose)
with open(app_cache_file, "r") as cache:
# read the first line as cache time
cache_time = cache.readline()
line = cache.readline()
return jsonify(**eval(line))
@app.route('/service-list')
def handle_service_list():
verbose = False
chosen_service = None
query_string = request.query_string
if not query_string == "":
for query in query_string.split(b'&'):
if b'verbose' in query.lower():
if query.endswith(b'1'):
verbose = True
elif b'service' in query.lower():
chosen_service = query[8:].decode("utf-8")
suffix = ".json"
if verbose:
suffix = ".verbose" + suffix
if chosen_service:
suffix = "." + chosen_service + suffix
print("Getting service list")
cache_file = os.path.join(CACHE_ROOT_DIRECTORY, 'service-list' + suffix)
if _read_from_cache(cache_file):
print("Reading cached data for this request.")
else:
print("Cache is out of date. Refreshing for this request.")
try:
with open(PATH) as config_data:
# This should handle json or yaml
data = yaml.safe_load(config_data)
if verbose:
print(request.url)
redirect_to_https = None
if NOHTTPS is None:
proto = request.headers.get("X-Forwarded-Proto")
if not proto == "https":
redirect_to_https = _check_ssl(request.url, verbose)
if redirect_to_https is not None:
return redirect_to_https
ret = {}
for app in data['apps']:
display = app.get('service_list')
if not display:
# skip this
continue
app_name = app.get('name')
# only run next section if specific service NOT chosen
if chosen_service:
if app_name != chosen_service:
continue
app_config = app.get('config')
region_list = []
for config in app_config:
if config.get('R53'):
region_list = []
for config_item in config['R53']:
if 'Regions' in config_item:
for item in config_item['Regions']:
region_name = item.get('Name')
if region_name not in region_list:
region_list.append(region_name)
elif config.get('S3'):
for item in config['S3']:
region_name = item.get('Name')
if region_name not in region_list:
region_list.append(region_name)
else:
region_name = config.get('region')
if region_name not in region_list:
region_list.append(region_name)
ret[app_name] = region_list
if not ret:
return redirect(url_for('handle_index'), code=302)
else:
_write_cache(cache_file, ret)
except:
print("Error: Unable to load new information")
traceback.print_exc()
with open(cache_file, "r") as cache:
# read the first line as cache time
cache_time = cache.readline()
line = cache.readline()
return jsonify(**eval(line))
def ip_list_sort(ret):
"""
sort ips in the nested dict list
:param ret:
:return:
"""
for region in ret:
for ip_list in ret[region]:
if ip_list == "all_ips":
# Remove any duplicates
ret[region][ip_list] = list(set(ret[region][ip_list]))
ret[region][ip_list].sort()
return ret
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.ico',
mimetype='image/vnd.microsoft.icon')
def jsonify(status=200, indent=4, sort_keys=False, **kwargs):
response = make_response(dumps(dict(**kwargs), indent=indent, sort_keys=sort_keys))
response.headers['Content-Type'] = 'application/json; charset=utf-8'
response.headers['mimetype'] = 'application/json'
response_code = status
return response
def _check_ssl(url, verbose=False):
if verbose:
print("Current scheme: %s" % url[:5])
if url[:5] == "https":
return None
else:
return redirect("https" + url[4:], code=302)
def _write_cache(app_cache_file, data):
with open(app_cache_file, "w+") as cache:
cache.write(str(time.time()))
cache.write("\n")
cache.write(str(data))
def parse_data_from_file(app_name, chosen_region, app_cache_file, data=None, verbose=False):
try:
ret = {}
if not data:
with open(PATH) as config_data:
# This should handle json or yaml
data = yaml.safe_load(config_data)
if verbose:
print(request.url)
redirect_to_https = None
if NOHTTPS is None:
proto = request.headers.get("X-Forwarded-Proto")
if not proto == "https":
redirect_to_https = _check_ssl(request.url, verbose)
if redirect_to_https:
return redirect_to_https
for app in data['apps']:
# create url link for both name and alternative name for ip-range apps
if app_name.lower() == app['name'].lower() or app_name.lower() == str(app.get('altname')).lower():
app_config = app['config']
for config in app_config:
if config.get('s3filepath'):
datapath = config.get('localpath')
awslib.get_file(BUCKET_NAME, config['s3filepath'], datapath)
with open(datapath) as filedata:
output = json.load(filedata)
break
elif config.get('R53'):
for config_item in config['R53']:
ret = {}
# Get all records for the given domain
zone_id = config_item['HostedZoneId']
print(f'Getting all records for Zone with ID {zone_id}')
all_zone_records = awslib.get_zone_records(zone_id)
ip_inclusions = []
inclusions = config_item.get('inclusions')
if inclusions:
print('Getting inclusions from config')
if 'dns_list' in inclusions:
for dns in inclusions['dns_list']:
dns_ips = awslib.list_balancer_ips(dns)
ip_inclusions.extend(dns_ips)
if 'ip_list' in inclusions:
ip_inclusions.extend(inclusions['ip_list'])
for item in config_item['Regions']:
print('Getting records for %s' % item['Name'])
ret[item['Name']] = {}
ret[item['Name']]['all_ips'] = []
ret[item['Name']]['all_ips'] = awslib.get_matching_records(all_zone_records,
item['Pattern'])
if 'last_modified' in item:
modified_date = str(item['last_modified'])
ret[item['Name']]['last_modified'] = modified_date
if len(ip_inclusions) > 0:
ret[item['Name']]['all_ips'].extend(ip_inclusions)
break
elif config.get('S3'):
ret = {}
for item in config['S3']:
print('Getting records for %s' % item['Name'])
bucket_name = item.get('bucket')
object_path = item.get('objectpath')
region = item.get('region')
ret[item['Name']] = {}
ret[item['Name']]['all_ips'] = []
if 'last_modified' in item:
modified_date = str(item['last_modified'])
ret[item['Name']]['last_modified'] = modified_date
ret[item['Name']]['last_modified'] = modified_date
if bucket_name and object_path and region:
file_contents = awslib.get_file_contents(bucket_name, object_path)
region_data = file_contents.get(region)
ret[item['Name']]['all_ips'] = region_data
inclusions = item.get('inclusions')
if inclusions:
print('Adding inclusions from config')
if 'dns_list' in inclusions:
for dns in inclusions['dns_list']:
dns_ips = awslib.list_balancer_ips(dns)
ret[item['Name']]['all_ips'].extend(dns_ips)
if 'ip_list' in inclusions:
ret[item['Name']]['all_ips'].extend(inclusions['ip_list'])
break
else:
region = config['region']
# only run next section if region equal chosen_region
if chosen_region:
if chosen_region != region:
continue
dnsname = config.get('dnsname')
inclusions = config.get('inclusions')
exclusions = config.get('exclusions')
eip_check = config.get('show_eip')
lb_check = config.get('show_lb_ip')
inst_check = config.get('show_inst_ip')
modified_date = config.get('last_modified')
if not ret.get(region):
ret[region] = {}
if 'last_modified' in config:
modified_date = str(config.get('last_modified'))
ret[region]['last_modified'] = modified_date
if not ret[region].get('all_ips'):
ret[region]['all_ips'] = []
if eip_check:
eips = awslib.list_eips(region, filter=exclusions)
# verbose only makes sense if we're not getting ALL EIPs
if verbose:
if not ret[region].get('eips'):
ret[region]['eips'] = eips
else:
ret[region]['eips'].extend(eips)
if eip_check:
ret[region]['all_ips'].extend(eips)
if lb_check:
elb = awslib.list_balancer_ips(dnsname)
if verbose:
if not ret[region].get('elb'):
ret[region]['elb'] = elb
else:
ret[region]['elb'].extend(elb)
if lb_check:
ret[region]['all_ips'].extend(elb)
if inst_check:
lb_names = config.get('lb_names')
lb_name = None
if not lb_names:
lb_name = awslib.get_active_balancer(dnsname, region)
if not lb_name and not lb_names:
print('ERROR: Unable to determine LB name(s) - cannot get instance IPs')
else:
if not lb_names:
lb_names = [lb_name]
for lb in lb_names:
inst_ips = awslib.list_instance_ips(lb.lower(), region)
if verbose:
if not ret[region].get('instance_ips'):
ret[region]['instance_ips'] = inst_ips
else:
ret[region]['instance_ips'].extend(inst_ips)
if inst_check:
ret[region]['all_ips'].extend(inst_ips)
if inclusions:
print('Adding inclusions from config')
if 'dns_list' in inclusions:
for dns in inclusions['dns_list']:
dns_ips = awslib.list_balancer_ips(dns)
if verbose:
if not ret[region].get('inclusions'):
ret[region]['inclusions'] = dns_ips
else:
ret[region]['inclusions'].extend(dns_ips)
ret[region]['all_ips'].extend(dns_ips)
if 'ip_list' in inclusions:
if verbose:
if not ret[region].get('inclusions'):
ret[region]['inclusions'] = inclusions['ip_list']
else:
ret[region]['inclusions'].extend(inclusions['ip_list'])
ret[region]['all_ips'].extend(inclusions['ip_list'])
if not ret:
return redirect(url_for('handle_index'), code=302)
else:
# sort ip list in ret when it can
ret = ip_list_sort(ret)
_write_cache(app_cache_file, ret)
except:
print("Error: Unable to load new information for app: " + str(app_name))
traceback.print_exc()
return app_cache_file
| mit | -8,513,882,742,004,758,000 | 38.467836 | 110 | 0.466983 | false |
ALPSCore/CT-HYB | tutorials/tutorial0/plot.py | 1 | 3599 | import numpy as np
import matplotlib.pyplot as plt
import pylab
import h5py
def read_param(h5, name):
if '/parameters/dictionary/'+name in h5:
return h5['/parameters/dictionary/'+name].value
elif '/parameters/'+name in h5:
return h5['/parameters/'+name].value
else:
raise RuntimeError("Parameter "+ name + " not found")
def compute_Tnl(n_matsubara, n_legendre):
Tnl = np.zeros((n_matsubara, n_legendre), dtype=complex)
for n in xrange(n_matsubara):
sph_jn = numpy.array([scipy.special.spherical_jn(l, (n+0.5)*numpy.pi) for l in range(n_legendre)])
for il in xrange(n_legendre):
Tnl[n,il] = ((-1)**n) * ((1J)**(il+1)) * np.sqrt(2*il + 1.0) * sph_jn[il]
return Tnl
def read_h5(p):
r = {}
print p+'.out.h5'
h5 = h5py.File(p+'.out.h5','r')
r["SITES"] = read_param(h5, 'model.sites')
r["BETA"] = read_param(h5, 'model.beta')
def load_g(path):
N = h5[path].shape[0]
M = h5[path].shape[1]
data = h5[path].value.reshape(N,M,M,2)
return data[:,:,:,0] + 1J*data[:,:,:,1]
r["Gtau"] = load_g('/gtau/data')
r["Gomega"] = load_g('/gf/data')
r["Sign"] = h5['/simulation/results/Sign/mean/value'].value
r["Sign_count"] = h5['/simulation/results/Sign/count'].value
return r
# input: G2 in the mix basis
# return G2 in the Matsubara freq. domain: (i,j,k,l, fermionic freq, fermionic freq, bosnic freq)
def compute_G2_matsubara(g2_l, niw_f):
nl_G2 = g2_l.shape[4]
Tnl_G2 = compute_Tnl(niw_f, nl_G2)
tmp = np.tensordot(g2_l, Tnl_G2.conjugate(), axes=(5,1))
return np.tensordot(Tnl_G2, tmp, axes=(1,4)).transpose((1,2,3,4,0,6,5))
prefix_list = ['input']
result_list = []
for p in prefix_list:
result_list.append(read_h5(p))
color_list = ['r', 'g', 'b', 'y', 'k', 'm']
params = {
'backend': 'ps',
'axes.labelsize': 24,
'text.fontsize': 24,
'legend.fontsize': 18,
'xtick.labelsize': 24,
'ytick.labelsize': 24,
'text.usetex': True,
}
pylab.rcParams.update(params)
plt.figure(1,figsize=(8,8))
plt.subplot(211)
plt.xlabel(r'$\tau/\beta$', fontname='serif')
plt.ylabel(r'$-\mathrm{Re}G(\tau)$', fontname='serif')
plt.yscale('log')
plt.subplot(212)
plt.xlabel(r'$\omega_n$', fontname='serif')
plt.ylabel(r'$-\mathrm{Im}G(i\omega_n)$', fontname='serif')
plt.xscale('log')
plt.yscale('log')
for i in range(len(result_list)):
norb = result_list[i]["SITES"]
beta = result_list[i]["BETA"]
nf = norb*2
sign = result_list[i]["Sign"]
gtau = result_list[i]["Gtau"]
giw = result_list[i]["Gomega"]
print "The number of measurements is ", result_list[i]["Sign_count"]
tau_point = np.linspace(0.0, 1.0, gtau.shape[0])
plt.subplot(211)
for i_f in range(nf):
plt.plot(tau_point, -gtau[:,i_f,i_f].real, color=color_list[i_f], marker='', label='flavor'+str(i_f), ls='--', markersize=0)
print "flavor ", i_f, "G(tau=0) = ", gtau[0,i_f,i_f].real , "G(tau=beta) = ", gtau[-1,i_f,i_f].real
omega_point = np.array([(2*im+1)*np.pi/beta for im in xrange(giw.shape[0])])
plt.subplot(212)
for i_f in range(nf):
plt.plot(omega_point, -giw[:,i_f,i_f].imag, color=color_list[i_f], marker='', label='flavor'+str(i_f), ls='--', markersize=0)
plt.plot(omega_point, 1/omega_point, color='k', label=r'$1/\omega_n$', ls='-')
plt.subplot(211)
plt.legend(loc='best',shadow=True,frameon=False,prop={'size' : 12})
plt.subplot(212)
plt.legend(loc='best',shadow=True,frameon=False,prop={'size' : 12})
plt.tight_layout()
plt.savefig("GF.eps")
plt.close(1)
| gpl-2.0 | 8,024,045,240,136,776,000 | 30.295652 | 133 | 0.601 | false |
egel/blog | fabfile.py | 1 | 2466 | from fabric.api import *
import fabric.contrib.project as project
import os
import shutil
import sys
import SocketServer
from pelican.server import ComplexHTTPRequestHandler
# Local path configuration (can be absolute or relative to fabfile)
env.deploy_path = 'output'
DEPLOY_PATH = env.deploy_path
# Remote server configuration
production = 'root@localhost:22'
dest_path = 'n'
# Rackspace Cloud Files configuration settings
env.cloudfiles_username = 'my_rackspace_username'
env.cloudfiles_api_key = 'my_rackspace_api_key'
env.cloudfiles_container = 'my_cloudfiles_container'
# Github Pages configuration
env.github_pages_branch = "gh-pages"
# Port for `serve`
PORT = 8000
def clean():
"""Remove generated files"""
if os.path.isdir(DEPLOY_PATH):
shutil.rmtree(DEPLOY_PATH)
os.makedirs(DEPLOY_PATH)
def build():
"""Build local version of site"""
local('pelican -s pelicanconf.py')
def rebuild():
"""`clean` then `build`"""
clean()
build()
def regenerate():
"""Automatically regenerate site upon file modification"""
local('pelican -r -s pelicanconf.py')
def serve():
"""Serve site at http://localhost:8000/"""
os.chdir(env.deploy_path)
class AddressReuseTCPServer(SocketServer.TCPServer):
allow_reuse_address = True
server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler)
sys.stderr.write('Serving on port {0} ...\n'.format(PORT))
server.serve_forever()
def reserve():
"""`build`, then `serve`"""
build()
serve()
def preview():
"""Build production version of site"""
local('pelican -s publishconf.py')
def cf_upload():
"""Publish to Rackspace Cloud Files"""
rebuild()
with lcd(DEPLOY_PATH):
local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 '
'-U {cloudfiles_username} '
'-K {cloudfiles_api_key} '
'upload -c {cloudfiles_container} .'.format(**env))
@hosts(production)
def publish():
"""Publish to production via rsync"""
local('pelican -s publishconf.py')
project.rsync_project(
remote_dir=dest_path,
exclude=".DS_Store",
local_dir=DEPLOY_PATH.rstrip('/') + '/',
delete=True,
extra_opts='-c',
)
def gh_pages():
"""Publish to GitHub Pages"""
rebuild()
local("ghp-import -b {github_pages_branch} {deploy_path}".format(**env))
local("git push origin {github_pages_branch}".format(**env))
| gpl-3.0 | -6,074,505,324,896,450,000 | 25.234043 | 76 | 0.657745 | false |
losonczylab/Zaremba_NatNeurosci_2017 | scripts/FigS12_all_conditions_enrichment_model.py | 1 | 3772 | """Figure S12 - All conditions enrichment model"""
FIG_FORMAT = 'svg'
import matplotlib as mpl
if FIG_FORMAT == 'svg':
mpl.use('agg')
elif FIG_FORMAT == 'pdf':
mpl.use('pdf')
elif FIG_FORMAT == 'interactive':
mpl.use('TkAgg')
import matplotlib.pyplot as plt
import cPickle as pickle
import seaborn.apionly as sns
from lab.misc import save_figure
import lab.plotting as plotting
import os
import sys
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..', 'enrichment_model'))
import enrichment_model_plotting as emp
import Df16a_analysis as df
WT_color = df.WT_color
Df_color = df.Df_color
colors = (WT_color, Df_color)
save_dir = df.fig_save_dir
filename = 'FigS12_model_all_conditions.{}'.format(FIG_FORMAT)
def main():
fig = plt.figure(figsize=(8.5, 11))
gs1 = plt.GridSpec(
2, 2, left=0.1, right=0.7, top=0.9, bottom=0.5, hspace=0.4, wspace=0.4)
WT_enrich_ax = fig.add_subplot(gs1[0, 0])
Df_enrich_ax = fig.add_subplot(gs1[0, 1])
WT_final_dist_ax = fig.add_subplot(gs1[1, 0])
Df_final_dist_ax = fig.add_subplot(gs1[1, 1])
simulations_path_A = os.path.join(
df.data_path, 'enrichment_model',
'WT_Df_enrichment_model_simulation_A.pkl')
simulations_path_B = os.path.join(
df.data_path, 'enrichment_model',
'WT_Df_enrichment_model_simulation_B.pkl')
simulations_path_C = os.path.join(
df.data_path, 'enrichment_model',
'WT_Df_enrichment_model_simulation_C.pkl')
m_A = pickle.load(open(simulations_path_A))
m_B = pickle.load(open(simulations_path_B))
m_C = pickle.load(open(simulations_path_C))
WT_colors = sns.light_palette(WT_color, 7)[2::2]
Df_colors = sns.light_palette(Df_color, 7)[2::2]
condition_labels = [
r'Condition $\mathrm{I}$',
r'Condition $\mathrm{II}$',
r'Condition $\mathrm{III}$']
WT_final_dists, Df_final_dists = [], []
for m, WT_c, Df_c in zip((m_A, m_B, m_C), WT_colors, Df_colors):
WT_enrich = emp.calc_enrichment(
m['WT_no_swap_pos'], m['WT_no_swap_masks'])
Df_enrich = emp.calc_enrichment(
m['Df_no_swap_pos'], m['Df_no_swap_masks'])
WT_final_dists.append(emp.calc_final_distributions(
m['WT_no_swap_pos'], m['WT_no_swap_masks']))
Df_final_dists.append(emp.calc_final_distributions(
m['Df_no_swap_pos'], m['Df_no_swap_masks']))
emp.plot_enrichment(
WT_enrich_ax, WT_enrich, WT_c, title='', rad=False)
emp.plot_enrichment(
Df_enrich_ax, Df_enrich, Df_c, title='', rad=False)
WT_enrich_ax.set_xlabel("Iteration ('session' #)")
Df_enrich_ax.set_xlabel("Iteration ('session' #)")
plotting.stackedText(
WT_enrich_ax, condition_labels, colors=WT_colors, loc=2, size=8)
plotting.stackedText(
Df_enrich_ax, condition_labels, colors=Df_colors, loc=2, size=8)
emp.plot_final_distributions(
WT_final_dist_ax, WT_final_dists,
WT_colors, labels=condition_labels, title='', rad=False)
emp.plot_final_distributions(
Df_final_dist_ax, Df_final_dists,
Df_colors, labels=condition_labels, title='', rad=False)
WT_final_dist_ax.set_xlabel('Distance from reward\n(fraction of belt)')
Df_final_dist_ax.set_xlabel('Distance from reward\n(fraction of belt)')
plotting.stackedText(
WT_final_dist_ax, condition_labels, colors=WT_colors, loc=2, size=8)
plotting.stackedText(
Df_final_dist_ax, condition_labels, colors=Df_colors, loc=2, size=8)
WT_final_dist_ax.set_yticks([0, 0.1, 0.2, 0.3])
Df_final_dist_ax.set_yticks([0, 0.1, 0.2, 0.3])
save_figure(fig, filename, save_dir=save_dir)
if __name__ == '__main__':
main()
| mit | -5,067,075,796,666,854,000 | 32.678571 | 79 | 0.6307 | false |
ikben/troposphere | examples/DynamoDB_Tables_OnDemand.py | 1 | 6548 | #!/usr/bin/python
from troposphere import (
Template,
If,
NoValue,
Equals,
Ref,
Output,
Parameter
)
from troposphere.dynamodb import (
KeySchema,
AttributeDefinition,
Projection,
ProvisionedThroughput,
Table,
GlobalSecondaryIndex
)
template = Template()
template.set_description("Create two dynamodb tables with "
"conditional on-demand billing. One "
"with global secondary index and one without")
on_demand = template.add_parameter(Parameter(
"BillOnDemand",
Type="String",
Default="true",
AllowedPattern="(false|true)"
))
readunits = template.add_parameter(Parameter(
"ReadCapacityUnits",
Description="Provisioned read throughput",
Type="Number",
Default="5",
MinValue="5",
MaxValue="10000",
ConstraintDescription="should be between 5 and 10000"
))
writeunits = template.add_parameter(Parameter(
"WriteCapacityUnits",
Description="Provisioned write throughput",
Type="Number",
Default="10",
MinValue="5",
MaxValue="10000",
ConstraintDescription="should be between 5 and 10000"
))
template.add_condition("OnDemand", Equals(Ref(on_demand), "true"))
hashkeyname = template.add_parameter(Parameter(
"HashKeyElementName",
Description="HashType PrimaryKey Name",
Type="String",
AllowedPattern="[a-zA-Z0-9]*",
MinLength="1",
MaxLength="2048",
ConstraintDescription="must contain only alphanumberic characters"
))
hashkeytype = template.add_parameter(Parameter(
"HashKeyElementType",
Description="HashType PrimaryKey Type",
Type="String",
Default="S",
AllowedPattern="[S|N]",
MinLength="1",
MaxLength="1",
ConstraintDescription="must be either S or N"
))
# N.B. If you remove the provisioning section this works for
# LocalSecondaryIndexes aswell.
tableIndexName = template.add_parameter(Parameter(
"TableIndexName",
Description="Table: Primary Key Field",
Type="String",
Default="id",
AllowedPattern="[a-zA-Z0-9]*",
MinLength="1",
MaxLength="2048",
ConstraintDescription="must contain only alphanumberic characters"
))
tableIndexDataType = template.add_parameter(Parameter(
"TableIndexDataType",
Description=" Table: Primary Key Data Type",
Type="String",
Default="S",
AllowedPattern="[S|N|B]",
MinLength="1",
MaxLength="1",
ConstraintDescription="S for string data, N for numeric data, or B for "
"binary data"
))
secondaryIndexHashName = template.add_parameter(Parameter(
"SecondaryIndexHashName",
Description="Secondary Index: Primary Key Field",
Type="String",
Default="tokenType",
AllowedPattern="[a-zA-Z0-9]*",
MinLength="1",
MaxLength="2048",
ConstraintDescription="must contain only alphanumberic characters"
))
secondaryIndexHashDataType = template.add_parameter(Parameter(
"SecondaryIndexHashDataType",
Description="Secondary Index: Primary Key Data Type",
Type="String",
Default="S",
AllowedPattern="[S|N|B]",
MinLength="1",
MaxLength="1",
ConstraintDescription="S for string data, N for numeric data, or B for "
"binary data"
))
secondaryIndexRangeName = template.add_parameter(Parameter(
"refreshSecondaryIndexRangeName",
Description="Secondary Index: Range Key Field",
Type="String",
Default="tokenUpdatedTime",
AllowedPattern="[a-zA-Z0-9]*",
MinLength="1",
MaxLength="2048",
ConstraintDescription="must contain only alphanumberic characters"
))
secondaryIndexRangeDataType = template.add_parameter(Parameter(
"SecondaryIndexRangeDataType",
Description="Secondary Index: Range Key Data Type",
Type="String",
Default="S",
AllowedPattern="[S|N|B]",
MinLength="1",
MaxLength="1",
ConstraintDescription="S for string data, N for numeric data, or B for "
"binary data"
))
myDynamoDB = template.add_resource(Table(
"myDynamoDBTable",
AttributeDefinitions=[
AttributeDefinition(
AttributeName=Ref(hashkeyname),
AttributeType=Ref(hashkeytype)
),
],
BillingMode=If("OnDemand", "PAY_PER_REQUEST", "PROVISIONED"),
ProvisionedThroughput=If("OnDemand", NoValue, ProvisionedThroughput(
ReadCapacityUnits=Ref(readunits),
WriteCapacityUnits=Ref(writeunits)
)),
KeySchema=[
KeySchema(
AttributeName=Ref(hashkeyname),
KeyType="HASH"
)
]
))
GSITable = template.add_resource(Table(
"GSITable",
AttributeDefinitions=[
AttributeDefinition(
AttributeName=Ref(tableIndexName),
AttributeType=Ref(tableIndexDataType)
),
AttributeDefinition(
AttributeName=Ref(secondaryIndexHashName),
AttributeType=Ref(secondaryIndexHashDataType)
),
AttributeDefinition(
AttributeName=Ref(secondaryIndexRangeName),
AttributeType=Ref(secondaryIndexRangeDataType)
)
],
BillingMode=If("OnDemand", "PAY_PER_REQUEST", "PROVISIONED"),
KeySchema=[
KeySchema(
AttributeName=Ref(tableIndexName),
KeyType="HASH"
)
],
ProvisionedThroughput=If("OnDemand", NoValue, ProvisionedThroughput(
ReadCapacityUnits=Ref(readunits),
WriteCapacityUnits=Ref(writeunits)
)),
GlobalSecondaryIndexes=[
GlobalSecondaryIndex(
IndexName="SecondaryIndex",
KeySchema=[
KeySchema(
AttributeName=Ref(secondaryIndexHashName),
KeyType="HASH"
),
KeySchema(
AttributeName=Ref(secondaryIndexRangeName),
KeyType="RANGE"
)
],
Projection=Projection(ProjectionType="ALL"),
ProvisionedThroughput=If("OnDemand", NoValue,
ProvisionedThroughput(
ReadCapacityUnits=Ref(readunits),
WriteCapacityUnits=Ref(writeunits)
)
)
)
]
))
template.add_output(Output(
"GSITable",
Value=Ref(GSITable),
Description="Table with a Global Secondary Index",
))
print(template.to_json())
| bsd-2-clause | 3,515,053,229,599,871,500 | 27.593886 | 76 | 0.622022 | false |
RoozbehFarhoodi/McNeuron | McNeuron/Neuron.py | 1 | 70044 | import sys
import numpy as np
from numpy import mean, cov, dot, transpose
from numpy import linalg as LA
import math
from scipy.sparse import csr_matrix
from __builtin__ import str
from copy import deepcopy
import matplotlib.pyplot as plt
from numpy.linalg import inv
#np.random.seed(0)
class Neuron(object):
"""Neuron Class
This class represents the neuron by a list of `Node`s. Borrowed from swc format, each node indicates a point on the neuron. each node has parent and children (at most two children) and set of all node with their parents make a tree structure; a connected graph without loop. The Soma represents by a few nodes and one of them is called root node and it's decendent of all the nodes in the neuron (including other soma nodes). Notice that all nodes from index 0 to index of `n_soma` in the `nodes_list` are soma.
This class contains the attributes to calculate different features of the neuron. The iput file can be a swc file or the list of nodes.
Parameters
-----------
file_format : string, optional (default=None)
input_file : string or list
if it is string, it will read the swc file from that address,
if it is list, the elements of the list should be object from Node's class
and corrsponding Tree is created.
all indexing for not-soma nodes (or the nodes that are very close to the soma) coming from self.nodes_list
Attributes
----------
n_soma : int
The number of the nodes that represents the soma.
n_node : int
The number of all the nodes in the neuron.
nodes_list : list of Node
The list of all the nodes in the neuron.
root : Node
The represented node of root.
location : array of shape = [3, n_node]
Three dimentional location of the nodes.
parent_index : array of shape = [n_node]
The index of the parent of each node in the nodes_list.
child_index : array of shape = [2, n_node]
first row: The index of first child of the node ( the ordering of the nodes are arrbitraty).
second row: nan if neuron is order oner and index of second child of the node if it's a branching node.
branch_order : array of shape = [n_node]
The number of children of the nodes. It can be and integer number for the root (first element) and only 0, 1 or 2 for other nodes.
ext_red_list : array of shape = [3, n_node]
first row: end points and order one nodes (for extension)
second row: end points (for removing)
third row: end point wich thier parents are order one nodes (for extension)
connection : array of shape = [n_node, n_node]
The matrix of connectivity of the nodes. The element (i,j) of the matrix is not np.nan if node i is a decendent of node j. The value at this array is the distance of j to its parent. It's useful for the calculation of the neural distance over Euclidain distance.
frustum : array of shape = [n_node] !!!NOT IMPLEMENTED!!!
The value of th fustum from the node toward its parent.
branch_order : array of shape = [n_node]
The number of children for each of nodes. Notice that for the nodes rther than root it should be 0, 1 or 2. For root it can be any integer number.
rall_ratio : array of shape = [n_node] !!!NOT IMPLEMENTED!!!
It's not nan only in branching nodes which its value is the rall ratio.
distance_from_root : array of shape = [n_node]
Euclidain distance toward the root.
distance_from_parent : array of shape = [n_node]
Euclidain distance toward the parent of the node.
slope : array of shape = [n_node]
ratio of euclidain distance toward the parent of the node over their diameter difference.
branch_angle : array of shape [3, n_nodes]
it shows the angles at the branching nodes: First row is the angle of two outward segments at the branching point Second and third rows are the angle betwen two outward segments and previous segment at the branching in arbitrary order (nan at other nodes).
angle_global : array of shape = [n_node]
The angle between the line linking the node to the root and the line likning it to its parent.
local_angle : array of shape = [n_node]
The angle between the line linking the node to its parent and its child and nan otherwise.
References
----------
.. [1] R.Farhoodi, K.P.Kording, "Generating Neuron Morphologies using naive Bayes MCMC"
"""
def __init__(self, file_format = None, input_file = None):
"""
Default constructor. There are three ways of representing the neuron.
In 'swc' the swc file in given and the output is a Neuron calss with all of its attributes.
In 'swc without attributes' the swc file in given and the output is a Neuron calss without its attributes. It's useful for the case that only nodes are important, e.g. visualization of the neurpn, in a fast way.
In 'only list of nodes' the list of all the nodes of the neuron is given.
"""
if(file_format == 'swc'): # swc is given
self.read_swc(input_file)
self.ratio_red_to_ext = 1.
self.n_node = len(self.nodes_list)
#self.set_n_soma_n_node()
self.set_parent()
self.parent_index = self.parent_index.astype(int)
#self.set_loc_diam()
self.fit()
if(file_format == 'swc without attributes'):
self.read_swc(input_file)
self.set_parent()
self.parent_index = self.parent_index.astype(int)
if(file_format == 'only list of nodes'):
self.nodes_list = input_file
self.root = self.find_root(self.nodes_list[0])
self.set_n_soma_n_node()
self.set_parent()
self.parent_index = self.parent_index.astype(int)
self.set_loc_diam()
self.fit()
if(file_format == 'Matrix of swc'):
# the n*7 array is given.
self.read_swc_matrix(input_file)
self.set_parent()
self.parent_index = self.parent_index.astype(int)
self.set_branch_order()
#self.set_sholl()
def __str__(self):
return "Neuron found with" + str(self.n_node) + " number of nodes and" + str(self.n_soma) + "number of node representing soma."
def fit(self):
"""
dependency:
self.nodes_list
self.n_soma
self.location
self.diameter
self.parent_index
self.child_index
self.root
self.n_nodes
output attributes are:
self.branch_order
self.connection
self.ext_red_list
self.rall_ratio
self.distance_from_root
self.distance_from_parent
self.branch_angle
self.global_angle
self.local_angle
self.frustum
"""
self.set_branch_order()
self.set_distance_from_root()
self.set_distance_from_parent()
self.set_connection2()
#self.set_rall_ratio()
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
#self.sholl_r = np.array([0]) # the position of the jumps for sholl analysis
#self.sholl_n = np.array([0]) # the value at the jumping (the same size as self.sholl_x)
#self.set_sholl()
self.ratio_red_to_ext = 1.
self.set_ext_red_list()
self.set_features()
def set_n_soma_n_node(self):
self.n_soma = 0
for n in self.nodes_list:
if n.type is 'soma':
self.n_soma += 1
if(self.n_soma == 0): # for no soma representation
self.n_soma = 1
self.n_node = len(self.nodes_list)
def set_features(self):
self.features = {}
self.features['Nnodes'] = np.array([self.n_node - self.n_soma])
#self.features['asymetric']
(num_branches,) = np.where(self.branch_order[self.n_soma:] == 2)
self.features['Nbranch'] = np.array([len(num_branches)])
self.features['initial_segments'] = np.array([len(self.root.children)])
self.features['global_angle'] = np.pi - self.global_angle[self.n_soma:]
#self.features['diameter'] = self.diameter[self.n_soma:]
self.features['distance_from_parent'] = self.distance_from_parent[self.n_soma:]
self.features['distance_from_root'] = self.distance_from_root[self.n_soma:]
self.features['ratio_euclidian_neuronal'] = np.nansum(self.connection[self.n_soma:,self.n_soma:],axis = 1)/self.distance_from_root[self.n_soma:]
x = np.abs(self.branch_angle[0,self.n_soma:])
self.features['branch_angle'] = x[~np.isnan(x)]
x = self.local_angle[self.n_soma:]
self.features['local_angle'] = x[~np.isnan(x)]
if(len(self.features['local_angle'])==0):
self.features['local_angle'] = np.array([0])
if(len(self.features['branch_angle']) == 0):
self.features['branch_angle'] = np.array([0])
self.features['discrepancy_space'] = np.array([self.discrepancy(10.,10.,10.)])
#self.features['repellent'] = np.array([self.repellent(10.,10.,10.)])
self.features['curvature'] = self.set_curvature()
important_node = self.important_nodes()
parent_important = self.parent_index_for_node_subset(important_node)
(neural, euclidan) = self.set_neural_euclid_branch(important_node, parent_important)
self.features['neural_important'] = neural
self.features['euclidian_important'] = euclidan
self.features['ratio_neural_euclidian_important'] = neural/euclidan
self.features['branch_angle_segment'] = self.set_branch_angle_segment(important_node, parent_important)
def important_nodes(self):
(branch_index,) = np.where(self.branch_order[self.n_soma:]==2)
(end_nodes,) = np.where(self.branch_order[self.n_soma:]==0)
important_node = np.append(branch_index,end_nodes)
if(len(important_node)!=0):
important_node = self.n_soma + important_node
return important_node
def set_neural_euclid_branch(self, important_node, thier_parents):
neural = np.array([])
euclidan = np.array([])
for i in range(important_node.shape[0]):
neural_length = self.distance(important_node[i],thier_parents[i])
euclidan_length = LA.norm(self.location[:,important_node[i]] - self.location[:,thier_parents[i]],2)
neural = np.append(neural, neural_length)
euclidan = np.append(euclidan, euclidan_length)
return neural, euclidan
def discrepancy(self, x_mesh, y_mesh, z_mesh):
X = self.normlize(self.location[0,:],x_mesh)
Y = self.normlize(self.location[1,:],y_mesh)
Z = self.normlize(self.location[2,:],z_mesh)
L = X + x_mesh*Y + x_mesh*y_mesh*Z
a, N = np.unique(L, return_counts=True)
return len(a) # np.flipud(np.sort(N))
def repellent(self, x_mesh, y_mesh, z_mesh):
X = self.normlize(self.location[0,:],x_mesh)
Y = self.normlize(self.location[1,:],y_mesh)
Z = self.normlize(self.location[2,:],z_mesh)
L = X + x_mesh*Y + x_mesh*y_mesh*Z
a, N = np.unique(L, return_counts=True)
return len(a) # np.flipud(np.sort(N))
def normlize(self, vector, mesh):
m = min(vector)
M = max(vector)
if(M==m):
a = 0
else:
a = np.floor(mesh*((vector - m)/(M-m)))
return a
def set_branch_order(self):
# terminal = 0, passig (non of them) = 1, branch = 2
"""
dependency:
nodes_list
"""
self.branch_order = np.array([])
for n in self.nodes_list:
self.branch_order = np.append(self.branch_order, len(n.children))
def set_ratio_red_to_ext(self,c):
self.ratio_red_to_ext = c
def set_ext_red_list(self):
"""
In the extension-reduction perturbation, one of the node will be removed or one node will be added. In the first case, the node can only be
an end point, but in the second case the new node might be added to any node that has one or zero child.
dependency:
self.nodes_list
self.branch_order
self.n_soma
self.ratio_red_to_ext
ext_red_list:
first row: end points and order one nodes (for extension)
second row: end points (for removing)
third row: end point wich thier parents are order one nodes (for extension)
Remarks:
1) The list is zero for the soma nodes.
2) The value for first and second is binary but the third row is self.ratio_red_to_ext
"""
(I,) = np.where(self.branch_order[self.n_soma:] == 0)
I = I + self.n_soma
self.ext_red_list = np.zeros((3, self.n_node))
self.ext_red_list[0,I] = 1
self.ext_red_list[0,np.where(self.branch_order == 1)] = 1
self.ext_red_list[1,I] = self.ratio_red_to_ext
J = np.array([])
for i in I:
if(len((self.nodes_list[i].parent).children) == 1):
J = np.append(J,i)
J = np.array(J, dtype = int)
self.ext_red_list[2,J] = 1
self.ext_red_list.astype(int)
self.ext_red_list[:,0:self.n_soma] = 0
def set_distance_from_root(self):
"""
dependency:
self.location
"""
self.distance_from_root = np.sqrt(sum(self.location ** 2))
def set_distance_from_parent(self):
"""
given:
self.location
self.parent_index
"""
a = (self.location - self.location[:,self.parent_index.astype(int)]) ** 2
self.distance_from_parent = np.sqrt(sum(a))
def set_branch_angle_segment(self, important_node, parent_important):
I = np.array([])
for i in important_node:
(J,) = np.where(parent_important == i)
if(len(J) == 2):
vec0 = np.expand_dims(self.location[:,important_node[J[0]]] - self.location[:,i], axis = 1)
vec1 = np.expand_dims(self.location[:,important_node[J[1]]] - self.location[:,i], axis = 1)
I = np.append(I,self.angle_vec_matrix(vec0,vec1))
return I
def set_branch_angle(self):
"""
An array with size [3, n_nodes] and shows the angles at the branching nodes:
First row is the angle of two outward segments at the branching point
Second and third rows are the angle betwen two outward segments and previous segment at the branching in arbitrary order (nan at other nodes).
dependency:
self.nodes_list
self.branch_order
self.location
self.parent_index
self.child_index
self.n_soma
"""
self.branch_angle = np.nan*np.zeros([3,self.n_node])
(I,) = np.where(self.branch_order == 2)
I = I[I>self.n_soma]
vec0 = self.location[:,self.child_index[0,I].astype(int)] - self.location[:,I]
vec1 = self.location[:,self.child_index[1,I].astype(int)] - self.location[:,I]
vec2 = self.location[:,self.parent_index[I].astype(int)] - self.location[:,I]
self.branch_angle[0,I] = self.angle_vec_matrix(vec0,vec1)
self.branch_angle[1,I] = self.angle_vec_matrix(vec0,vec2)
self.branch_angle[2,I] = self.angle_vec_matrix(vec2,vec1)
def set_global_angle(self):
"""
dependency:
sefl.location
self.parent_index
self.n_soma
"""
dir = self.location - self.location[:,self.parent_index.astype(int)]
self.global_angle = self.angle_vec_matrix(self.location, dir)
def set_local_angle(self):
"""
dependency:
self.location
self.n_soma
self.branch_order
self.parent_index
"""
self.local_angle = np.nan*np.ones(self.n_node)
(I,) = np.where(self.branch_order[self.n_soma:] == 1)
I = I + self.n_soma
dir1 = self.location[:,I] - self.location[:,self.parent_index[I].astype(int)]
dir2 = self.location[:,I] - self.location[:,self.child_index[0,I].astype(int)]
self.local_angle[I] = self.angle_vec_matrix(dir1, dir2)
def set_frustum(self):
"""
dependency:
self.distance_from_parent
self.n_soma
self.diameter
self.parent_index
"""
self.frustum = np.array([0])
l = self.distance_from_parent[self.n_soma:]
r = self.diameter[self.n_soma:]
R = self.diameter[self.parent_index][self.n_soma:]
f = (np.pi/3.0)*l*(r ** 2 + R ** 2 + r * R)
self.frustum = np.append(np.zeros(self.n_soma), f)
def set_curvature(self):
par = self.parent_index
papar = par[par]
papapar = par[par[par]]
dir1 = self.location[:,par] - self.location
dir2 = self.location[:,papar] - self.location[:,par]
dir3 = self.location[:,papapar] - self.location[:,papar]
cros1 = np.cross(np.transpose(dir1), np.transpose(dir2))
cros2 = np.cross(np.transpose(dir2), np.transpose(dir3))
I = self.angle_vec_matrix(np.transpose(cros1), np.transpose(cros2))
return I[self.n_soma:]
def set_rall_ratio(self):
"""
dependency:
self.diameter
self.child_index
self.n_soma
self.n_node
"""
self.rall_ratio = np.nan*np.ones(self.n_node)
(I,) = np.where(self.branch_order[self.n_soma:] == 2)
ch1 = np.power(self.diameter[self.child_index[0,I]],2./3.)
ch2 = np.power(self.diameter[self.child_index[1,I]],2./3.)
n = np.power(self.diameter[I],2./3.)
self.rall_ratio[I] = (ch1+ch2)/n
def set_values_ite(self):
"""
set iteratively the following attributes:
parent_index
child_index
location
diameter
rall_ratio
distance_from_root
distance_from_parent
slope
branch_angle
branch_order
"""
self.parent_index = np.zeros(self.n_soma)
self.child_index = np.nan * np.ones([2,self.n_soma])
for n in self.nodes_list[1:]:
self.location = np.append(self.location, n.xyz.reshape([3,1]), axis = 1)
self.diameter = np.append(self.diameter, n.r)
for n in self.nodes_list[1:]:
#self.frustum = np.append(self.frustum, self.calculate_frustum(n))
#self.rall_ratio = np.append(self.rall_ratio, self.calculate_rall(n))
self.distance_from_root = np.append(self.distance_from_root, self.calculate_distance_from_root(n))
self.distance_from_parent = np.append(self.distance_from_parent, self.calculate_distance_from_parent(n))
#self.slope = np.append(self.slope, self.calculate_slope(n))
ang, ang1, ang2 = self.calculate_branch_angle(n)
an = np.zeros([3,1])
an[0,0] = ang
an[1,0] = ang1
an[2,0] = ang2
if(self.branch_angle.shape[1] == 0):
self.branch_angle = an
else:
self.branch_angle = np.append(self.branch_angle, an, axis = 1)
glob_ang, local_ang = self.calculate_node_angles(n)
self.global_angle = np.append(self.global_angle, glob_ang)
self.local_angle = np.append(self.local_angle, local_ang)
#self.neural_distance_from_soma = np.append(self.neural_distance_from_soma, self.calculate_neural_distance_from_soma(n))
for n in self.nodes_list[self.n_soma:]:
self.parent_index = np.append(self.parent_index, self.get_index_for_no_soma_node(n.parent))
if(self.branch_order[self.get_index_for_no_soma_node(n)]==2):
a = np.array([self.get_index_for_no_soma_node(n.children[0]),self.get_index_for_no_soma_node(n.children[1])]).reshape(2,1)
self.child_index = np.append(self.child_index, a, axis = 1)
if(self.branch_order[self.get_index_for_no_soma_node(n)]==1):
a = np.array([self.get_index_for_no_soma_node(n.children[0]),np.nan]).reshape(2,1)
self.child_index = np.append(self.child_index, a, axis = 1)
if(self.branch_order[self.get_index_for_no_soma_node(n)]==0):
a = np.array([np.nan,np.nan]).reshape(2,1)
self.child_index = np.append(self.child_index, a, axis = 1)
def set_parent(self):
self.parent_index = np.zeros(self.n_soma)
self.child_index = np.zeros([2,self.n_node])
for n in self.nodes_list[self.n_soma:]:
par = self.get_index_for_no_soma_node(n.parent)
node = self.get_index_for_no_soma_node(n)
self.parent_index = np.append(self.parent_index, par)
if self.child_index[0,par] != 0:
self.child_index[1,par] = node
else:
self.child_index[0,par] = node
self.child_index[self.child_index == 0] = np.nan
self.child_index[:,0:self.n_soma] = np.nan
#self.parent_index.astype(int)
def set_loc_diam(self):
self.location = np.zeros([3,self.n_node])
self.diameter = np.zeros(self.n_node)
for n in range(self.n_node):
self.location[:,n] = self.nodes_list[n].xyz
self.diameter[n] = self.nodes_list[n].r
def set_connection2(self):
"""
dependency:
self.nodes_list
self.n_soma
self.n_node
self.parent_index
self.distance_from_parent
"""
connection = np.zeros([self.n_node,self.n_node]) # the connectivity matrix
connection[np.arange(self.n_node), self.parent_index.astype(int)] = 1
connection[0,0] = 0
connection = inv(np.eye(self.n_node) - connection)
connection[connection != 1] = np.nan
for i in range(self.n_node):
(J,) = np.where(~np.isnan(connection[:,i]))
connection[J,i] = self.distance_from_parent[i]
connection[:,0] = 1
connection[np.arange(self.n_soma),np.arange(self.n_soma)] = 1
self.connection = connection
#return connection
def set_connection(self):
"""
connection is an array with size [n_node, n_node]. The element (i,j) is not np.nan if
node i is a decendent of node j. The value at this array is the distance of j to its parent.
dependency:
self.nodes_list
self.n_soma
self.parent_index
self.distance_from_parent
"""
self.parent_index = np.array(self.parent_index, dtype = int)
L = self.n_node - self.n_soma
C = csr_matrix((np.ones(L),(range(self.n_soma,self.n_node), self.parent_index[self.n_soma:])), shape = (self.n_node,self.n_node))
self.connection = np.zeros([self.n_node,self.n_node]) # the connectivity matrix
new = 0
i = 0
old = C.sum()
while(new != old):
self.connection = C.dot(csr_matrix(self.connection)) + C
old = new
new = self.connection.sum()
self.connection = self.connection.toarray()
self.connection[range(1,self.n_node),range(1,self.n_node)] = 1
self.connection[:,:self.n_soma] = 0
# fill the matrix with the distance
for i in range(self.n_node):
self.connection[self.connection[:,i] != 0,i] = self.distance_from_parent[i]
self.connection[self.connection == 0] = np.nan
def set_sholl(self):
self.sholl_r = np.array([])
for n in self.nodes_list:
dis = LA.norm(self.xyz(n) - self.root.xyz,2)
self.sholl_r = np.append(self.sholl_r, dis)
self.sholl_r = np.sort(np.array(self.sholl_r))
self.sholl_n = np.zeros(self.sholl_r.shape)
for n in self.nodes_list:
if(n.parent != None):
par = n.parent
dis_par = LA.norm(self.xyz(par) - self.root.xyz,2)
dis_n = LA.norm(self.xyz(par) - self.root.xyz,2)
M = max(dis_par, dis_n)
m = min(dis_par, dis_n)
I = np.logical_and(self.sholl_r>=m, self.sholl_r<=M)
self.sholl_n[I] = self.sholl_n[I] + 1
def xyz(self, node):
return self.location[:,self.get_index_for_no_soma_node(node)]
def _r(self, node):
return self.diameter[self.get_index_for_no_soma_node(node)]
def parent_index_for_node_subset(self, subset):
"""
inputs
------
index of subset of the nodes without root node
output
------
Index of grand parent inside of the subset for each member of subset
"""
if((subset==0).sum() == 0):
subset = np.append(0,subset)
n = subset.shape[0]
self.connection[:,0] = 1.
self.connection[np.arange(self.n_soma),np.arange(self.n_soma)] = 1.
A = self.connection[np.ix_(subset,subset)]
A[np.isnan(A)] = 0
A[A!=0] = 1.
B = np.eye(subset.shape[0]) - inv(A)
return subset[np.where(B==1)[1]]
def distance(self, index1, index2):
"""
Neural distance between two nodes in the neuron.
inputs
------
index1, index2 : the indecies of the nodes.
output
------
the neural distance between the node.
"""
return min(self.distance_two_node_up_down(index1,index2),self.distance_two_node_up_down(index2,index1))
def distance_two_node_up_down(self, Upindex, Downindex):
(up,) = np.where(~np.isnan(self.connection[Downindex,:]))
(down,) = np.where(~np.isnan(self.connection[:,Upindex]))
I = np.intersect1d(up,down)
if(I.shape[0] != 0):
return sum(self.distance_from_parent[I]) - self.distance_from_parent[Upindex]
else:
return np.inf
def calculate_overall_matrix(self, node):
j = self.get_index_for_no_soma_node(node)
k = self.get_index_for_no_soma_node(node.parent)
(J,) = np.where(~ np.isnan(self.connection[:,j]))
dis = LA.norm(self.location[:,k] - self.location[:,j],2)
self.connection[J,j] = dis
def calculate_branch_order(self,node):
"""
terminal = 0, passig (non of them) = 1, branch = 2
"""
return len(node.children)
def calculate_frustum(self,node):
"""
the Volume of the frustum ( the node with its parent) at each location. (nan for the nodes of soma)
"""
r = self._r(node)
r_par = self._r(node.parent)
dis = LA.norm(self.xyz(node) - self.xyz(node.parent) ,2)
f = dis*(np.pi/3.0)*(r*r + r*r_par + r_par*r_par)
return f
def calculate_rall(self,node):
if(len(node.children) == 2):
n1, n2 = node.children
r1 = self._r(n1)
r2 = self._r(n2)
r = self._r(node)
rall = (np.power(r1,2.0/3.0)+(np.power(r2,2.0/3.0)))/np.power(r,2.0/3.0)
else:
rall = np.nan
return rall
def calculate_distance_from_root(self,node):
return LA.norm(self.xyz(node) - self.root.xyz,2)
def calculate_distance_from_parent(self,node):
return LA.norm(self.xyz(node) - self.xyz(node.parent),2)
def calculate_slope(self,node):
# the ratio of: delta(pos)/delta(radius)
dis = LA.norm(self.xyz(node) - self.xyz(node.parent),2)
rad = node.r - node.parent.r
if(dis == 0):
val = rad
else:
val = rad/dis
return val
def calculate_branch_angle(self,node):
# the mean of the angle betwen two outward segments and previous segment at the branching (nan at other nodes)
if(len(node.children) == 2):
n1, n2 = node.children
nodexyz = self.xyz(node)
node_parxyz = self.xyz(node.parent)
node_chixyz1 = self.xyz(n1)
node_chixyz2 = self.xyz(n2)
vec = node_parxyz - nodexyz
vec1 = node_chixyz1 - nodexyz
vec2 = node_chixyz2 - nodexyz
ang = self.angle_vec(vec1,vec2) # the angle of two outward segments at the branching point (nan for non-branchings)
ang1 = self.angle_vec(vec1,vec)
ang2 = self.angle_vec(vec2,vec)
else:
ang = np.nan
ang1 = np.nan
ang2 = np.nan
return ang, ang1, ang2
def calculate_node_angles(self,node):
par = node.parent
nodexyz = self.xyz(node)
node_parxyz = self.xyz(node.parent)
vec1 = node_parxyz - nodexyz
vec2 = self.root.xyz - nodexyz
glob_ang = self.angle_vec(vec1,vec2)
if(node.children != None):
if(len(node.children) ==1):
[child] = node.children
vec3 = self.xyz(child) - nodexyz
local_ang = self.angle_vec(vec1,vec3)
else:
local_ang = np.nan
else:
local_ang = np.nan
return glob_ang, local_ang
# Axulary functions
def angle_vec_matrix(self,matrix1,matrix2):
"""
Takes two matrix 3*n of matrix1 and matrix2 and gives back
the angles for each corresponding n vectors.
Note: if the norm of one of the vectors is zeros the angle is np.pi
"""
ang = np.zeros(matrix1.shape[1])
norm1 = LA.norm(matrix1, axis = 0)
norm2 = LA.norm(matrix2, axis = 0)
domin = norm1*norm2
(J,) = np.where(domin != 0)
ang[J] = np.arccos(np.maximum(np.minimum(sum(matrix1[:,J]*matrix2[:,J])/domin[J],1),-1))
return ang
def angle_vec(self,vec1,vec2):
val = sum(vec1*vec2)/(LA.norm(vec1,2)*LA.norm(vec2,2))
if(LA.norm(vec1,2)==0 or LA.norm(vec2,2) == 0):
val = -1
return math.acos(max(min(val,1),-1))
def choose_random_node_index(self):
n = np.floor((self.n_node-self.n_soma)*np.random.random_sample()).astype(int)
return n + self.n_soma
def p_ext_red_whole(self):
"""
Thos function gives back the probabiliy of the chossing one of the node add_node
extend it.
"""
return self.ext_red_list[0:2,:].sum()+1 # 1 added because the root may extend
def p_ext_red_end_point(self):
"""
Those function gives back the probabiliy of the chossing one of the node add_node
extend it.
"""
return self.ext_red_list[1:3,:].sum()
def get_index_for_no_soma_node(self,node):
return self.nodes_list.index(node)
def _list_for_local_update(self,node):
"""
Return the index of node, its parent and any children it may have.
The node should be a no_soma node
"""
update_list = np.array([]) # index of all nodes for update
update_list = np.append(update_list, self.get_index_for_no_soma_node(node))
if(node.parent.type != 'soma'):
update_list = np.append(update_list, self.get_index_for_no_soma_node(node.parent)) # if the node doesnt have a parent in no_soma list, i.e. its parent is a soma, get_index would return nothing
if(node.children != None):
for n in node.children:
update_list = np.append(update_list, self.get_index_for_no_soma_node(n))
return update_list.astype(int)
def _update_attribute(self,update_list):
for ind in update_list:
#self.frustum[ind] = self.calculate_frustum(self.nodes_list[ind])
#self.rall_ratio[ind] = self.calculate_rall(self.nodes_list[ind])
self.distance_from_root[ind] = self.calculate_distance_from_root(self.nodes_list[ind])
self.distance_from_parent[ind] = self.calculate_distance_from_parent(self.nodes_list[ind])
#self.slope[ind] = self.calculate_slope(self.nodes_list[ind])
self.branch_order[ind] = self.calculate_branch_order(self.nodes_list[ind])
ang, ang1, ang2 = self.calculate_branch_angle(self.nodes_list[ind])
self.branch_angle[0, ind] = ang
self.branch_angle[1, ind] = ang1
self.branch_angle[2, ind] = ang2
ang1, ang2 = self.calculate_node_angles(self.nodes_list[ind])
self.global_angle[ind] = ang1
self.local_angle[ind] = ang2
self.calculate_overall_matrix(self.nodes_list[ind])
#self.sholl_r = np.array([]) # the position of the jumps for sholl analysis
#self.sholl_n = np.array([]) # the value at the jumping (the same size as self.sholl_x)
def change_location(self,index,displace):
"""
Change the location of one of the node in the neuron updates the attribute accordingly.
Parameters:
___________
index: the index of node in no_soma_list to change its diameter
displace: the location of new node is the xyz of the current locatin + displace
"""
# First change the location of the node by displace
node = self.nodes_list[index]
self.location[:,index] += displace
self._update_attribute(self._list_for_local_update(node))
self.set_features()
def change_location_toward_end_nodes(self,index,displace):
(I,) = np.where(~np.isnan(self.connection[:,index]))
self.location[0,I] += displace[0]
self.location[1,I] += displace[1]
self.location[2,I] += displace[2]
self.set_distance_from_root()
self.set_distance_from_parent()
self.connection[np.ix_(I,[index])] = self.distance_from_parent[index]
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def change_location_important(self, index, displace):
(branch_index,) = np.where(self.branch_order[self.n_soma:]==2)
(end_nodes,) = np.where(self.branch_order[self.n_soma:]==0)
branch_index += self.n_soma
end_nodes += self.n_soma
I = np.append(branch_index, end_nodes)
parents = self.parent_index_for_node_subset(I)
(ind,) = np.where(I == index)
origin = deepcopy(self.location[:,index])
# correct the segment to the parent
par = parents[ind][0]
(up,) = np.where(~np.isnan(self.connection[index,:]))
(down,) = np.where(~np.isnan(self.connection[:,par]))
J = np.intersect1d(up,down)
A = self.location[:,J]
loc = self.location[:,par]
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
r1 = origin - loc
r2 = r1 + displace
M = self.scalar_rotation_matrix_to_map_two_vector(r1, r2)
A = np.dot(M,A)
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,J] = A
changed_ind = J
# correct the children
(ch,) = np.where(parents == index)
for i in I[ch]:
(up,) = np.where(~np.isnan(self.connection[i,:]))
(down,) = np.where(~np.isnan(self.connection[:,index]))
J = np.intersect1d(up,down)
A = self.location[:,J]
loc = self.location[:,i]
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
r1 = origin - loc
r2 = r1 + displace
M = self.scalar_rotation_matrix_to_map_two_vector( r1, r2)
A = np.dot(M,A)
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,J] = A
changed_ind = np.append(changed_ind, J)
self.location[:,index] = origin + displace
self.set_distance_from_root()
self.set_distance_from_parent()
for i in changed_ind:
(J,) = np.where(~np.isnan(self.connection[:,i]))
self.connection[J,i] = self.distance_from_parent[i]
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def scalar_rotation_matrix_to_map_two_vector(self, v1, v2):
r1 = LA.norm(v1,2)
norm1 = v1/r1
r2 = LA.norm(v2,2)
normal2 = v2/r2
a = sum(normal2*norm1)
theta = -np.arccos(a)
normal2 = normal2 - a*norm1
norm2 = normal2/LA.norm(normal2,2)
cross = np.cross(norm1, norm2)
B = np.zeros([3,3])
B[:,0] = norm1
B[:,1] = norm2
B[:,2] = cross
A = np.eye(3)
A[0,0] = np.cos(theta)
A[1,0] = - np.sin(theta)
A[0,1] = np.sin(theta)
A[1,1] = np.cos(theta)
return (r2/r1) * np.dot(np.dot(B,A),inv(B))
def change_diameter(self,index,ratio):
"""
Change the diameter of one node in the neuron updates the attribute accordingly.
Parameters:
___________
index: the index of node in no_soma_list to change its diameter
ratio: the radius of new node is the radius of current node times ratio
"""
node = self.nodes_list[index]
node.r = ratio*node.r
r = node.r
self.diameter[index] = r
self._update_attribute(self._list_for_local_update(node))
self.set_features()
def rescale_toward_end(self,node, rescale):
"""
Rescale the part of neuron form the node toward the end nodes.
input
-----
node : `Node` class
the node of the neuron which the location of other nodes in the neuron have it as thier decendent would be changed.
rescale : positive float
The value to rescale the part of the neuron.
"""
index = self.get_index_for_no_soma_node(node)
(I,) = np.where(~np.isnan(self.connection[:,index]))
A = self.location[:,I]
loc = self.xyz(node)
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
A = rescale*A
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,I] = A
self.set_distance_from_root()
self.set_distance_from_parent()
I = I.tolist()
I.remove(index)
I = np.array(I,dtype = int)
self.connection[:,I] *= rescale
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def rotate(self, node, matrix):
"""
Rotate the neuron around the parent of the node with the given matrix.
The attribute to update:
location
distance_from_root
branch_angle
angle_global
local_angle
"""
# set of nodes under parent of node
par = node.parent
(I,) = np.where(~np.isnan(self.connection[:,self.get_index_for_no_soma_node(par)]))
A = self.location[:,I]
loc = self.xyz(par)
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
A = np.dot(matrix, A)
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,I] = A
self.set_distance_from_root()
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def rotate_from_branch(self, node, matrix):
branch_index = self.get_index_for_no_soma_node(node.parent)
(I,) = np.where(~np.isnan(self.connection[:,self.get_index_for_no_soma_node(node)]))
#I = np.append(I, branch_index)
A = self.location[:,I]
loc = self.xyz(node.parent)
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
A = np.dot(matrix, A)
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,I] = A
self.set_distance_from_root()
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def remove_node(self, index):
"""
Removes a non-soma node from the neuron and updates the features
Parameters
-----------
Node : the index of the node in the no_soma_list
the node should be one of the end-points, otherwise gives an error
"""
self.n_node -= 1
node = self.nodes_list[index]
parent_index = self.get_index_for_no_soma_node(node.parent)
# details of the removed node for return
p = node.parent
node.parent.remove_child(node)
l = self.location[:,index] - self.location[:,parent_index]
r = self.diameter[index]/self.diameter[parent_index]
self.location = np.delete(self.location,index, axis = 1)
self.nodes_list.remove(node)
self.branch_order = np.delete(self.branch_order,index)
new_parent_index = self.get_index_for_no_soma_node(p)
self.branch_order[new_parent_index] -= 1
self.diameter = np.delete(self.diameter,index)
#self.frustum = np.delete(self.frustum,index)
#self.rall_ratio = np.delete(self.rall_ratio,index)
self.distance_from_root = np.delete(self.distance_from_root,index)
self.distance_from_parent = np.delete(self.distance_from_parent,index)
#self.slope = np.delete(self.slope,index)
self.branch_angle = np.delete(self.branch_angle,index, axis = 1)
self.global_angle = np.delete(self.global_angle,index)
self.local_angle = np.delete(self.local_angle,index)
self.connection = np.delete(self.connection,index, axis = 0)
self.connection = np.delete(self.connection,index, axis = 1)
self.parent_index = np.delete(self.parent_index,index)
I = np.where(self.parent_index > index)
self.parent_index[I] -= 1
self.child_index = np.delete(self.child_index,index,axis = 1)
I , J = np.where(self.child_index > index)
self.child_index[I,J] -= 1
if p.type is not 'soma':
if len(p.children) == 1:
self.branch_angle[0,new_parent_index] = np.nan
self.branch_angle[1,new_parent_index] = np.nan
self.branch_angle[2,new_parent_index] = np.nan
gol, loc = self.calculate_node_angles(self.nodes_list[new_parent_index])
self.child_index[:,new_parent_index] = np.array([self.get_index_for_no_soma_node(p.children[0]), np.nan])
self.local_angle[new_parent_index] = loc
if len(p.children) == 0:
self.local_angle[new_parent_index] = np.nan
self.child_index[:,new_parent_index] = np.array([np.nan, np.nan])
#self.sholl_r = None # the position of the jumps for sholl analysis
#self.sholl_n = None # the value at the jumping (the same size as self.sholl_x)
self.set_ext_red_list()
self.set_features()
return p, l, r
def extend_node(self,parent,location,ratio):
"""
Extend the neuron by adding one end point and updates the attribute for the new neuron.
Parameters:
___________
Parent: the node that the extended node attached to
location: the xyz of new node is the sum of location and xyz of parent
ratio: the radius of new node is the radius of parent times ratio
"""
self.n_node += 1
if parent is 'soma':
parent = self.root
n = Node()
in_par = self.get_index_for_no_soma_node(parent)
n.type = 'apical'
R = ratio * self.diameter[in_par]
n.parent = parent
parent.add_child(n)
self.location = np.append(self.location, (self.location[:,in_par] + location).reshape([3,1]), axis = 1)
self.diameter = np.append(self.diameter, R)
self.nodes_list.append(n)
#self.frustum = np.append(self.frustum,np.nan)
self.branch_order = np.append(self.branch_order ,0)
self.branch_order[self.get_index_for_no_soma_node(parent)] += 1
#self.rall_ratio = np.append(self.rall_ratio ,np.nan)
self.distance_from_root = np.append(self.distance_from_root,np.nan)
self.distance_from_parent = np.append(self.distance_from_parent ,np.nan)
#self.slope = np.append(self.slope ,np.nan)
if(self.branch_angle.shape[1] == 0):
self.branch_angle = np.nan*np.ones([3,1])
else:
self.branch_angle = np.append(self.branch_angle, np.nan*np.ones([3,1]), axis = 1)
self.global_angle = np.append(self.global_angle ,np.nan)
self.local_angle = np.append(self.local_angle ,np.nan)
l = self.connection.shape[0]
I = np.nan*np.zeros([1,l])
(J,) = np.where(~np.isnan(self.connection[self.get_index_for_no_soma_node(parent),:]))
I[0,J] = self.connection[self.get_index_for_no_soma_node(parent),J]
self.connection = np.append(self.connection, I , axis = 0)
self.connection = np.append(self.connection, np.nan*np.zeros([l+1,1]), axis = 1)
self.connection[l,l] = LA.norm(location,2)
self.parent_index = np.append(self.parent_index, self.get_index_for_no_soma_node(parent))
self.child_index = np.append(self.child_index,np.array([np.nan, np.nan]).reshape(2,1), axis = 1)
if parent.type is not 'soma':
if(len(parent.children) == 1):
self.child_index[:,self.get_index_for_no_soma_node(parent)] = np.array([self.get_index_for_no_soma_node(n), np.nan])
if(len(parent.children) == 2):
self.child_index[1,self.get_index_for_no_soma_node(parent)] = self.get_index_for_no_soma_node(n)
update_list = self._list_for_local_update(n)
self._update_attribute(update_list)
self.set_ext_red_list()
self.set_features()
return self.get_index_for_no_soma_node(n)
def add_extra_node(self, node):
print 1
def slide(self, moving_node_index, no_branch_node_index):
"""
"""
# adjust nodes
moving_node = self.nodes_list[moving_node_index]
no_branch_node = self.nodes_list[no_branch_node_index]
parent_moving_node = moving_node.parent
parent_moving_node_index = self.get_index_for_no_soma_node(parent_moving_node)
parent_moving_node.remove_child(moving_node)
moving_node.parent = no_branch_node
no_branch_node.add_child(moving_node)
# adjust parent_index and child_index
self.parent_index[moving_node_index] = no_branch_node_index
a = self.child_index[:,parent_moving_node_index]
if(self.branch_order[parent_moving_node_index] == 2):
if(a[0] == moving_node_index):
self.child_index[:,parent_moving_node_index] = np.array([a[1],np.nan])
if(a[1] == moving_node_index):
self.child_index[:,parent_moving_node_index] = np.array([a[0],np.nan])
if(self.branch_order[parent_moving_node_index] == 1):
self.child_index[:,parent_moving_node_index] = np.array([np.nan,np.nan])
self.branch_order[parent_moving_node_index] -= 1
#self.set_parent()
if(self.branch_order[no_branch_node_index] == 1):
a = self.child_index[:,no_branch_node_index]
self.child_index[:,no_branch_node_index] = np.array([a[0],moving_node_index])
if(self.branch_order[no_branch_node_index] == 0):
self.child_index[:,no_branch_node_index] = np.array([moving_node_index,np.nan])
self.branch_order[no_branch_node_index] += 1
# adjust location
(segment,) = np.where(~np.isnan(self.connection[:,moving_node_index]))
self.location[0,segment] += self.location[0,no_branch_node_index] - self.location[0,parent_moving_node_index]
self.location[1,segment] += self.location[1,no_branch_node_index] - self.location[1,parent_moving_node_index]
self.location[2,segment] += self.location[2,no_branch_node_index] - self.location[2,parent_moving_node_index]
# adjust connection
(up_ind,) = np.where(~np.isnan(self.connection[parent_moving_node_index,:]))
self.connection[np.ix_(segment,up_ind)] = np.nan
(down_ind,) = np.where(~np.isnan(self.connection[no_branch_node_index,:]))
a = self.distance_from_parent[down_ind].reshape([1,len(down_ind)])
A = np.repeat(a,len(segment),axis = 0)
self.connection[np.ix_(segment,down_ind)] = A
self.set_ext_red_list()
self.set_distance_from_root()
self.set_distance_from_parent()
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
#self.set_frustum()
self.set_features()
def horizental_stretch(self, node_index, parent_node, scale):
(up,) = np.where(~np.isnan(self.connection[node_index,:]))
(down,) = np.where(~np.isnan(self.connection[:,parent_node]))
I = np.intersect1d(up,down)
A = self.location[:,I]
loc = self.location[:,parent_node]
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
r = self.location[:,node_index] - loc
r = r/LA.norm(r,2)
A = scale*A +(1-scale)*(np.dot(np.expand_dims(r,axis = 1),np.expand_dims(np.dot(r,A),axis = 0)))
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,I] = A
self.set_distance_from_root()
self.set_distance_from_parent()
for i in I:
(J,) = np.where(~np.isnan(self.connection[:,i]))
self.connection[J,i] = self.distance_from_parent[i]
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def vertical_stretch(self, node_index, parent_node, scale):
(up,) = np.where(~np.isnan(self.connection[node_index,:]))
(down,) = np.where(~np.isnan(self.connection[:,parent_node]))
I = np.intersect1d(up,down)
A = self.location[:,I]
loc = self.location[:,parent_node]
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
r = self.location[:,node_index] - loc
new_loc = -(1-scale)*(r)
r = r/LA.norm(r,2)
A = A -(1-scale)*(np.dot(np.expand_dims(r,axis = 1),np.expand_dims(np.dot(r,A),axis = 0)))
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,I] = A
(T,) = np.where(~np.isnan(self.connection[:,node_index]))
T = list(T)
T.remove(node_index)
A = self.location[:,T]
A[0,:] += new_loc[0]
A[1,:] += new_loc[1]
A[2,:] += new_loc[2]
self.location[:,T] = A
self.set_distance_from_root()
self.set_distance_from_parent()
T = np.array(T)
for i in np.append(T,I):
(J,) = np.where(~np.isnan(self.connection[:,i]))
self.connection[J,i] = self.distance_from_parent[i]
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def sinusidal(self, node_index, parent_index, hight, n_vertical, n_horizental):
"""
NOT READY
"""
(up,) = np.where(~np.isnan(self.connection[node_index,:]))
(down,) = np.where(~np.isnan(self.connection[:,parent_node]))
I = np.intersect1d(up,down)
A = self.location[:,I]
loc = self.location[:,parent_node]
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
r = self.location[:,node_index] - loc
new_loc = -(1-scale)*(r)
r = r/LA.norm(r,2)
A = A -(1-scale)*(np.dot(np.expand_dims(r,axis = 1),np.expand_dims(np.dot(r,A),axis = 0)))
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,I] = A
(T,) = np.where(~np.isnan(self.connection[:,node_index]))
T = list(T)
T.remove(node_index)
A = self.location[:,T]
A[0,:] += new_loc[0]
A[1,:] += new_loc[1]
A[2,:] += new_loc[2]
self.location[:,T] = A
self.set_distance_from_root()
self.set_distance_from_parent()
T = np.array(T)
for i in np.append(T,I):
(J,) = np.where(~np.isnan(self.connection[:,i]))
self.connection[J,i] = self.distance_from_parent[i]
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def get_root(self):
"""
Obtain the root Node
Returns
-------
root : :class:`Node`
"""
return self.__root
def is_root(self, node):
"""
Check whether a Node is the root Node
Parameters
-----------
node : :class:`Node`
Node to be check if root
Returns
--------
is_root : boolean
True is the queried Node is the root, False otherwise
"""
if node.parent is None:
return True
else:
return False
def is_leaf(self, node):
"""
Check whether a Node is a leaf Node, i.e., a Node without children
Parameters
-----------
node : :class:`Node`
Node to be check if leaf Node
Returns
--------
is_leaf : boolean
True is the queried Node is a leaf, False otherwise
"""
if len(node.children) == 0:
return True
else:
return False
def is_branch(self, node):
"""
Check whether a Node is a branch Node, i.e., a Node with two children
Parameters
-----------
node : :class:`Node`
Node to be check if branch Node
Returns
--------
is_leaf : boolean
True is the queried Node is a branch, False otherwise
"""
if len(node.children) == 2:
return True
else:
return False
def find_root(self, node):
if node.parent is not None:
node = self.find_root(node.parent)
return node
def add_node_with_parent(self, node, parent):
"""
Add a Node to the tree under a specific parent Node
Parameters
-----------
node : :class:`Node`
Node to be added
parent : :class:`Node`
parent Node of the newly added Node
"""
node.parent = parent
if parent is not None:
parent.add_child(node)
self.add_node(node)
def add_node(self,node):
self.nodes_list.append(node)
def read_swc(self, input_file):
"""
Read the swc file and fill the attributes accordingly.
The assigned attributes are:
n_soma
n_node
nodes_list
location
type
diameter
parent_index
child_index
"""
self.n_soma = 0
self.nodes_list = []
self.location = np.array([0, 0, 0] ).reshape(3,1)
self.type = 1
self.parent_index = np.array([0])
child_index = csr_matrix((2,1000000))
f = open(input_file, 'r')
B = True
try:
for line in f:
if not line.startswith('#'):
split = line.split()
index = int(split[0].rstrip())
swc_type = int(split[1].rstrip())
x = float(split[2].rstrip())
y = float(split[3].rstrip())
z = float(split[4].rstrip())
radius = float(split[5].rstrip())
parent_index = int(split[6].rstrip())
if(parent_index == -1):
self.n_soma += 1
x_root = x
y_root = y
z_root = z
self.diameter = radius
else:
if(swc_type == 1):
self.n_soma += 1
self.location = np.append(self.location, np.array([x - x_root, y - y_root, z - z_root]).reshape(3,1), axis = 1)
self.diameter = np.append(self.diameter, radius)
self.type = np.append(self.type, swc_type)
self.parent_index = np.append(self.parent_index, parent_index - 1)
if(parent_index != 1):
if(child_index[0,parent_index-1]==0):
child_index[0,parent_index-1] = index-1
else:
child_index[1,parent_index-1] = index-1
node = Node()
node.xyz = np.array([x,y,z])
node.r = np.array([radius])
node.set_type(swc_type)
if(parent_index == -1):
self.add_node(node)
self.root = node
else:
self.add_node_with_parent(node,self.nodes_list[parent_index-1])
self.n_node = len(self.nodes_list)
a = child_index[:,0:self.n_node]
#a = a -1
a = a.toarray()
a[a==0] = np.nan
self.child_index = a
except:
print('deleted Neuron')
def read_swc_matrix(self, input_file):
"""
Read the an swc matrix and fill the attributes accordingly.
The assigned attributes are:
n_soma
n_node
nodes_list
location
type
diameter
parent_index
child_index
"""
self.n_soma = 0
self.nodes_list = []
self.location = np.array([0, 0, 0] ).reshape(3,1)
self.type = 1
self.parent_index = np.array([0])
child_index = csr_matrix((2,1000000))
n_node = input_file.shape[0]
for line in range(n_node):
index = input_file[line,0]
swc_type = input_file[line,1]
x = input_file[line,2]
y = input_file[line,3]
z = input_file[line,4]
radius = input_file[line,5]
parent_index = int(input_file[line,6])
if(parent_index == -1):
self.n_soma += 1
x_root = x
y_root = y
z_root = z
self.diameter = radius
else:
if(swc_type == 1):
self.n_soma += 1
self.location = np.append(self.location, np.array([x - x_root, y - y_root, z - z_root]).reshape(3,1), axis = 1)
self.diameter = np.append(self.diameter, radius)
self.type = np.append(self.type, swc_type)
self.parent_index = np.append(self.parent_index, parent_index - 1)
if(parent_index != 1):
if(child_index[0,parent_index-1]==0):
child_index[0,parent_index-1] = index-1
else:
child_index[1,parent_index-1] = index-1
node = Node()
node.xyz = np.array([x,y,z])
node.r = np.array([radius])
node.set_type(swc_type)
if(parent_index == -1):
self.add_node(node)
self.root = node
else:
self.add_node_with_parent(node,self.nodes_list[parent_index-1])
self.n_node = len(self.nodes_list)
a = child_index[:,0:self.n_node]
a = a.toarray()
a[a==0] = np.nan
self.child_index = a
def get_swc(self):
swc = np.zeros([self.n_node,7])
remain = [self.root]
index = np.array([-1])
for i in range(self.n_node):
n = remain[0]
swc[i,0] = i+1
swc[i,1] = n.set_type_from_name()
ind = self.get_index_for_no_soma_node(n)
if(ind > self.n_soma):
swc[i,2] = self.location[0,ind]
swc[i,3] = self.location[1,ind]
swc[i,4] = self.location[2,ind]
swc[i,5] = self.diameter[ind]
swc[i,6] = index[0]
else:
swc[i,2] = n.xyz[0]
swc[i,3] = n.xyz[1]
swc[i,4] = n.xyz[2]
swc[i,5] = n.r
swc[i,6] = 1
for m in n.children:
remain.append(m)
index = np.append(index,i+1)
remain = remain[1:]
index = index[1:]
swc[0,6] = -1
return swc
def write_swc(self, input_file):
"""
Used to write an SWC file from a morphology stored in this
:class:`Neuron`.
"""
writer = open(input_file, 'w')
swc = self.get_swc()
for i in range(swc.shape[0]):
string = (str(swc[i,0])+' '+str(swc[i,1]) + ' ' + str(swc[i,2]) +
' ' + str(swc[i,3]) + ' ' + str(swc[i,4]) +
' ' + str(swc[i,5]) + ' ' + str(swc[i,6]))
writer.write(string + '\n')
writer.flush()
writer.close()
def get_random_branching_or_end_node(self):
(b,) = np.where(self.branch_order[self.n_soma:] == 2)
(e,) = np.where(self.branch_order[self.n_soma:] == 0)
I = np.append(b,e)
if(len(I) == 0):
n = Node()
n.type = 'empty'
else:
I += self.n_soma
i = np.floor(len(I)*np.random.rand())
n = self.nodes_list[I[i]]
return n
def get_random_no_soma_node(self):
l = self.n_node - self.n_soma
return self.nodes_list[(np.floor(l*np.random.rand()) + self.n_soma).astype(int)]
def get_random_branching_node(self):
"""
Return one of the branching point in the neuron.
dependency:
self.branch_order
self.nodes_list
self.n_soma
"""
(I,) = np.where(self.branch_order[self.n_soma:] == 2)
if(len(I) == 0):
n = Node()
n.type = 'empty'
else:
I += self.n_soma
i = np.floor(len(I)*np.random.rand())
n = self.nodes_list[I[i]]
return n
def get_random_order_one_node_not_in_certain_index(self, index):
"""
Return one of the order one point in the neuron.
dependency:
self.branch_order
self.nodes_list
self.n_soma
"""
(I,) = np.where(self.branch_order == 1)
I = I[I>=self.n_soma]
I = np.setdiff1d(I,index)
if(len(I) == 0):
n = Node()
n.type = 'empty'
else:
i = np.floor(len(I)*np.random.rand())
n = self.nodes_list[I[i]]
return n
def get_random_non_branch_node_not_in_certain_index(self, index):
"""
Return one of the order one point in the neuron.
dependency:
self.branch_order
self.nodes_list
self.n_soma
"""
(I,) = np.where(self.branch_order != 2)
I = I[I>=self.n_soma]
I = np.setdiff1d(I,index)
if(len(I) == 0):
n = Node()
n.type = 'empty'
else:
i = np.floor(len(I)*np.random.rand())
n = self.nodes_list[I[i]]
return n
def is_soma(self):
if(self.n_node == self.n_soma):
return True
else:
return False
def set_nodes_values(self):
i = 0
for n in self.nodes_list:
n.xyz = self.location[:,i]
n.r = self.diameter[i]
i += 1
def show_features(self,size_x = 15,size_y = 17 ,bin_size = 20):
n = 6
m = 2
plt.figure(figsize=(size_x,size_y))
plt.subplot(n,m,1)
a = self.global_angle
b = plt.hist(a[~np.isnan(a)],bins = bin_size,color = 'g')
#plt.xlabel('angle (radian)')
plt.ylabel('density')
plt.title('Global angles')
plt.subplot(n,m,2)
a = self.local_angle
b = plt.hist(a[~np.isnan(a)],bins = bin_size,color = 'g')
#plt.xlabel('angle (radian)')
plt.ylabel('density')
plt.title('Local angles')
plt.subplot(n,m,3)
plt.title('Neuronal/Euclidian distance from root')
a = self.features['ratio_euclidian_neuronal']
plt.hist(a[~np.isnan(a)],bins = bin_size ,color = 'g')
#plt.xlabel('ratio')
plt.ylabel('density')
plt.subplot(n,m,4)
plt.hist(self.distance_from_parent,bins = bin_size,color = 'g')
plt.title('Distance from parent')
#plt.xlabel('distance (um)')
plt.ylabel('density')
plt.subplot(n,m,5)
plt.hist(self.distance_from_root,bins = bin_size)
#plt.xlabel('distance (um)')
plt.ylabel('density')
plt.title('Distance from soma')
plt.subplot(n,m,6)
a = self.features['branch_angle']
plt.hist(a[~np.isnan(a)],bins = bin_size)
#plt.xlabel('angle (radian)')
plt.ylabel('density')
plt.title('Angle at the branching points')
plt.subplot(n,m,7)
a = self.features['curvature']
plt.hist(a,bins = bin_size)
#plt.xlabel('angle (radian)')
plt.ylabel('density')
plt.title('curvature')
plt.subplot(n,m,8)
a = self.features['neural_important']
plt.hist(a,bins = bin_size)
#plt.xlabel('angle (radian)')
plt.ylabel('density')
plt.title('lenght of neural segments')
plt.subplot(n,m,9)
a = self.features['ratio_neural_euclidian_important']
plt.hist(a,bins = bin_size)
#plt.xlabel('angle (radian)')
plt.ylabel('density')
plt.title('ratio of neural to euclidian distance for segments')
#fig, ax = plt.subplots(n,m,6)
plt.subplot(n,m,10)
ind = np.arange(4)
width = 0.35
plt.bar(ind,(self.n_node,self.features['Nbranch'],self.features['initial_segments'],self.features['discrepancy_space']),color='r');
#plt.title('Numberical features')
#plt.set_xticks(ind + width)
plt.xticks(ind,('Nnodes', 'Nbranch', 'Ninitials', 'discrepancy'))
class Node(object):
"""
Node class for each nodes in the Neuron class.
each node has parent (another node), children (None, one or more nodes), radius,
Euclidian cordinates and type
The children are in a list
"""
def __init__(self):
self.parent = None
self.children = []
self.r = np.array([0.])
self.xyz = np.array([0.,0.,0.])
self.type = None # it can be soma, dendrite, axon, basal, apical
def get_parent(self):
"""
Return the parent Node of this one.
Returns
-------
parent : :class:`Node`
In case of the root, None is returned. Otherwise a :class:`Node` is
returned
"""
return self.__parent
def set_parent(self, parent):
"""
Set the parent Node of a given other Node
Parameters
----------
Node : :class:`Node`
"""
self.__parent = parent
def get_children(self):
"""
Return the children nodes of this one (if any)
Returns
-------
children : list :class:`Node`
In case of a leaf an empty list is returned
"""
return self.__children
def set_children(self, children):
"""
Set the children nodes of this one
Parameters
----------
children: list :class:`Node`
"""
self.__children = children
def get_radius(self):
"""
Returns
-------
radius : float
"""
return self.r
def set_radius(self, radius):
self.r = radius
def getxyz(self):
"""
Returns
-------
radius : float
"""
return self.xyz
def setxyz(self, xyz):
self.xyz = xyz
def set_type(self,index):
if(index == 0):
self.type = 'undefined'
elif(index == 1):
self.type = 'soma'
elif(index == 2):
self.type = 'axon'
elif(index == 3):
self.type = 'basal'
elif(index == 4):
self.type = 'apical'
def set_type_from_name(self):
if(self.type == 'undefined'):
return 0
if(self.type == 'soma'):
return 1
if(self.type == 'axon'):
return 2
if(self.type == 'basal'):
return 3
if(self.type == 'apical'):
return 4
def add_child(self, child_node):
"""
add a child to the children list of a given Node
Parameters
-----------
Node : :class:`Node`
"""
self.children.append(child_node)
def remove_child(self, child):
"""
Remove a child Node from the list of children of a specific Node
Parameters
-----------
Node : :class:`Node`
If the child doesn't exist, you get into problems.
"""
self.children.remove(child)
| mit | 2,015,097,327,887,328,300 | 36.984816 | 515 | 0.546885 | false |
tooxie/blatt | blatt/api/resources.py | 1 | 2976 | # -*- coding: utf-8 -*-
from flask.ext.restful import fields
from blatt.api.fields import (RelatedResource, Geolocation, InstanceURL,
ForeignKeyField)
from blatt.api.restful import BlattResource
from blatt.persistence import session, Publication, Article, Journalist, Media
class ArticleResource(BlattResource):
def get_filters(self):
return (
('publication_pk', 'publication'),
('section_pk', 'section'),
)
def get_fields(self):
return {
'pk': fields.String,
'title': fields.String,
'deck': fields.String(''),
'lead': fields.String(''),
'body': fields.String,
'url': fields.String,
'geolocation': Geolocation('latitude', 'longitude'),
'publication_date': fields.DateTime,
'publication': ForeignKeyField('publications', ['name']),
'section': ForeignKeyField('sections', ['name']),
'authors': ForeignKeyField('journalists', ['name']),
'media': ForeignKeyField('/media/'),
}
def filter(self, queryset, options):
j_id = options.get('journalist')
if j_id:
queryset = queryset.filter(Article.authors.any(pk=j_id))
return queryset
def get_one(self, art_id, options=None):
return session.query(Article).get(art_id)
def get_all(self):
return session.query(Article)
class PublicationResource(BlattResource):
def get_fields(self):
return {
'pk': fields.String,
'name': fields.String,
'slug': fields.String,
'logo': fields.String,
'website': fields.String(attribute='url'),
'url': InstanceURL('publications'),
'articles': RelatedResource('/articles/', 'publication'),
}
def get_one(self, pub_id, options=None):
return session.query(Publication).get(pub_id)
def get_all(self):
return session.query(Publication)
class JournalistResource(BlattResource):
def get_fields(self):
return {
'pk': fields.String,
'name': fields.String,
'url': InstanceURL('journalists'),
'articles': RelatedResource('/articles/', 'journalist'),
}
def get_one(self, j_id, options=None):
return session.query(Journalist).get(j_id)
def get_all(self):
return session.query(Journalist)
class MediaResource(BlattResource):
def get_fields(self):
return {
'url': InstanceURL('media'),
'image': fields.String(attribute='url'),
'caption': fields.String,
'article': ForeignKeyField('articles'),
'photographer': ForeignKeyField('photographers'),
}
def get_one(self, media_id, options=None):
return session.query(Media).get(media_id)
def get_all(self):
return session.query(Media)
| agpl-3.0 | 4,782,923,363,249,512,000 | 30 | 78 | 0.580309 | false |
nrudenko/anarcho | anarchoApp/anarcho/routes/apps.py | 1 | 2840 | from anarcho.models.token import Token
from anarcho.models.user import User
from flask.json import jsonify
import os
from anarcho import storage_worker, app, db
from anarcho.serializer import serialize
from anarcho.access_manager import app_permissions, login_required
from flask.helpers import send_file
from anarcho.models.application import Application
from anarcho.models.user_app import UserApp
from flask import request, Response, make_response, g
from anarcho.storage_workers import LocalStorageWorker
@app.route('/api/apps', methods=['GET'])
@login_required
def apps_list():
user_apps = UserApp.query.filter_by(user_id=g.user.id).all()
return serialize(user_apps)
@app.route('/api/apps', methods=['POST'])
@login_required
def app_create():
name = request.json['name']
new_app = Application(name)
user_app = UserApp(g.user.id, new_app.app_key, "w")
db.session.add(new_app)
db.session.add(user_app)
db.session.commit()
api_user = User(name='guest_{0}'.format(name))
db.session.add(api_user)
db.session.commit()
api_user_token = Token(api_user)
api_user_app = UserApp(api_user.id, new_app.app_key, "u")
db.session.add(api_user_app)
db.session.add(api_user_token)
db.session.commit()
return serialize(user_app)
@app.route('/api/apps/<app_key>', methods=['DELETE'])
@login_required
@app_permissions(permissions=["w"])
def remove_application(app_key):
application = Application.query.filter_by(app_key=app_key).first()
if application:
db.session.delete(application)
storage_worker.remove_app(application)
db.session.commit()
return Response(status=200)
@app.route('/api/apps/<app_key>', methods=['GET'])
@login_required
def app_info(app_key):
application = UserApp.query.filter_by(app_key=app_key, user_id=g.user.id).first()
if application:
application.icon_url = storage_worker.get_icon_link(app_key)
return serialize(application)
return make_response('{"error":"app_not_found"}', 404)
@app.route('/api/icon/<app_key>', methods=['GET'])
def get_icon(app_key=None):
if isinstance(storage_worker, LocalStorageWorker):
icon_path = storage_worker.get_icon_path(app_key)
if os.path.exists(icon_path):
return send_file(icon_path)
return Response(status=404)
@app.route('/api/apps/<app_key>/plugin', methods=['GET'])
@login_required
@app_permissions(permissions=['w', 'r'])
def get_plugin_config(app_key):
user_app = UserApp.query.filter_by(app_key=app_key, permission='u').first()
if user_app is None:
return make_response('{"error":"app_not_found"}', 404)
user = user_app.user
response = {
'host': app.config['PUBLIC_HOST'],
'app_key': app_key,
'api_token': user.token.auth_token
}
return jsonify(response)
| mit | 7,978,958,861,447,964,000 | 30.910112 | 85 | 0.687324 | false |
JaySon-Huang/misc | leetoj/235.py | 1 | 1040 | # Lowest Common Ancestor of a Binary Search Tree
# https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-search-tree
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root is None:
return None
if root.val > p.val and root.val > q.val:
return self.lowestCommonAncestor(
root.left, p, q
)
elif root.val < p.val and root.val < q.val:
return self.lowestCommonAncestor(
root.right, p, q
)
else:
# 由于题目限定树为BST, 当根结点val在查询的两个结点val范围之内,
# 等价于此根结点为p, q的最近公共祖先
return root
| mit | -5,334,315,556,457,048,000 | 28.151515 | 78 | 0.544699 | false |
ahuarte47/QGIS | tests/src/python/test_qgspallabeling_base.py | 1 | 16956 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsPalLabeling: base suite setup
From build dir, run: ctest -R PyQgsPalLabelingBase -V
See <qgis-src-dir>/tests/testdata/labeling/README.rst for description.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import collections
__author__ = 'Larry Shaffer'
__date__ = '07/09/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
import qgis # NOQA
import os
import sys
import datetime
import glob
import shutil
from qgis.PyQt.QtCore import QSize, qDebug, Qt
from qgis.PyQt.QtGui import QFont, QColor
from qgis.core import (
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
QgsDataSourceUri,
QgsGeometry,
QgsLabelingEngineSettings,
QgsProject,
QgsMapSettings,
QgsPalLabeling,
QgsPalLayerSettings,
QgsProviderRegistry,
QgsStringReplacementCollection,
QgsVectorLayer,
QgsVectorLayerSimpleLabeling,
QgsMultiRenderChecker,
QgsUnitTypes
)
from qgis.testing import start_app, unittest
from qgis.testing.mocked import get_iface
from utilities import (
unitTestDataPath,
getTempfilePath,
renderMapToImage,
loadTestFonts,
getTestFont,
openInBrowserTab
)
start_app(sys.platform != 'darwin') # No cleanup on mac os x, it crashes the pallabelingcanvas test on exit
FONTSLOADED = loadTestFonts()
PALREPORT = 'PAL_REPORT' in os.environ
PALREPORTS = {}
# noinspection PyPep8Naming,PyShadowingNames
class TestQgsPalLabeling(unittest.TestCase):
_TestDataDir = unitTestDataPath()
_PalDataDir = os.path.join(_TestDataDir, 'labeling')
_TestFont = getTestFont() # Roman at 12 pt
""":type: QFont"""
_MapRegistry = None
""":type: QgsProject"""
_MapSettings = None
""":type: QgsMapSettings"""
_Canvas = None
""":type: QgsMapCanvas"""
_BaseSetup = False
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# qgis iface
cls._Iface = get_iface()
cls._Canvas = cls._Iface.mapCanvas()
cls._TestFunction = ''
cls._TestGroup = ''
cls._TestGroupPrefix = ''
cls._TestGroupAbbr = ''
cls._TestGroupCanvasAbbr = ''
cls._TestImage = ''
cls._TestMapSettings = None
cls._Mismatch = 0
cls._Mismatches = dict()
cls._ColorTol = 0
cls._ColorTols = dict()
# initialize class MapRegistry, Canvas, MapRenderer, Map and PAL
# noinspection PyArgumentList
cls._MapRegistry = QgsProject.instance()
cls._MapSettings = cls.getBaseMapSettings()
osize = cls._MapSettings.outputSize()
cls._Canvas.resize(QSize(osize.width(), osize.height())) # necessary?
# set color to match render test comparisons background
cls._Canvas.setCanvasColor(cls._MapSettings.backgroundColor())
cls.setDefaultEngineSettings()
cls._BaseSetup = True
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def setUp(self):
"""Run before each test."""
TestQgsPalLabeling.setDefaultEngineSettings()
self.lyr = self.defaultLayerSettings()
@classmethod
def setDefaultEngineSettings(cls):
"""Restore default settings for pal labeling"""
settings = QgsLabelingEngineSettings()
settings.setPlacementVersion(QgsLabelingEngineSettings.PlacementEngineVersion2)
cls._MapSettings.setLabelingEngineSettings(settings)
@classmethod
def removeAllLayers(cls):
cls._MapSettings.setLayers([])
cls._MapRegistry.removeAllMapLayers()
@classmethod
def removeMapLayer(cls, layer):
if layer is None:
return
lyr_id = layer.id()
cls._MapRegistry.removeMapLayer(lyr_id)
ms_layers = cls._MapSettings.layers()
if layer in ms_layers:
ms_layers.remove(layer)
cls._MapSettings.setLayers(ms_layers)
@classmethod
def getTestFont(cls):
return QFont(cls._TestFont)
@classmethod
def loadFeatureLayer(cls, table, chk=False):
if chk and cls._MapRegistry.mapLayersByName(table):
return
vlayer = QgsVectorLayer('{}/{}.geojson'.format(cls._PalDataDir, table), table, 'ogr')
assert vlayer.isValid()
# .qml should contain only style for symbology
vlayer.loadNamedStyle(os.path.join(cls._PalDataDir,
'{0}.qml'.format(table)))
# qDebug('render_lyr = {0}'.format(repr(vlayer)))
cls._MapRegistry.addMapLayer(vlayer)
# place new layer on top of render stack
render_lyrs = [vlayer]
render_lyrs.extend(cls._MapSettings.layers())
# qDebug('render_lyrs = {0}'.format(repr(render_lyrs)))
cls._MapSettings.setLayers(render_lyrs)
# zoom to aoi
cls._MapSettings.setExtent(cls.aoiExtent())
cls._Canvas.zoomToFullExtent()
return vlayer
@classmethod
def aoiExtent(cls):
"""Area of interest extent, which matches output aspect ratio"""
aoilayer = QgsVectorLayer('{}/aoi.geojson'.format(cls._PalDataDir), 'aoi', 'ogr')
assert aoilayer.isValid()
return aoilayer.extent()
@classmethod
def getBaseMapSettings(cls):
"""
:rtype: QgsMapSettings
"""
ms = QgsMapSettings()
crs = QgsCoordinateReferenceSystem()
""":type: QgsCoordinateReferenceSystem"""
# default for labeling test data: WGS 84 / UTM zone 13N
crs.createFromSrid(32613)
ms.setBackgroundColor(QColor(152, 219, 249))
ms.setOutputSize(QSize(420, 280))
ms.setOutputDpi(72)
ms.setFlag(QgsMapSettings.Antialiasing, True)
ms.setFlag(QgsMapSettings.UseAdvancedEffects, False)
ms.setFlag(QgsMapSettings.ForceVectorOutput, False) # no caching?
ms.setDestinationCrs(crs)
ms.setExtent(cls.aoiExtent())
return ms
def cloneMapSettings(self, oms):
"""
:param QgsMapSettings oms: Other QgsMapSettings
:rtype: QgsMapSettings
"""
ms = QgsMapSettings()
ms.setBackgroundColor(oms.backgroundColor())
ms.setOutputSize(oms.outputSize())
ms.setOutputDpi(oms.outputDpi())
ms.setFlags(oms.flags())
ms.setDestinationCrs(oms.destinationCrs())
ms.setExtent(oms.extent())
ms.setOutputImageFormat(oms.outputImageFormat())
ms.setLabelingEngineSettings(oms.labelingEngineSettings())
ms.setLayers(oms.layers())
return ms
def configTest(self, prefix, abbr):
"""Call in setUp() function of test subclass"""
self._TestGroupPrefix = prefix
self._TestGroupAbbr = abbr
# insert test's Class.function marker into debug output stream
# this helps visually track down the start of a test's debug output
testid = self.id().split('.')
self._TestGroup = testid[1]
self._TestFunction = testid[2]
testheader = '\n#####_____ {0}.{1} _____#####\n'.\
format(self._TestGroup, self._TestFunction)
qDebug(testheader)
# define the shorthand name of the test (to minimize file name length)
self._Test = '{0}_{1}'.format(self._TestGroupAbbr,
self._TestFunction.replace('test_', ''))
def defaultLayerSettings(self):
lyr = QgsPalLayerSettings()
lyr.fieldName = 'text' # default in test data sources
font = self.getTestFont()
font.setPointSize(32)
format = lyr.format()
format.setFont(font)
format.setNamedStyle('Roman')
format.setSize(32)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.buffer().setJoinStyle(Qt.BevelJoin)
lyr.setFormat(format)
return lyr
@staticmethod
def settingsDict(lyr):
"""Return a dict of layer-level labeling settings
.. note:: QgsPalLayerSettings is not a QObject, so we can not collect
current object properties, and the public properties of the C++ obj
can't be listed with __dict__ or vars(). So, we sniff them out relative
to their naming convention (camelCase), as reported by dir().
"""
res = {}
for attr in dir(lyr):
if attr[0].islower() and not attr.startswith("__"):
value = getattr(lyr, attr)
if isinstance(value, (QgsGeometry, QgsStringReplacementCollection, QgsCoordinateTransform)):
continue # ignore these objects
if not isinstance(value, collections.Callable):
res[attr] = value
return res
def controlImagePath(self, grpprefix=''):
if not grpprefix:
grpprefix = self._TestGroupPrefix
return os.path.join(self._TestDataDir, 'control_images',
'expected_' + grpprefix,
self._Test, self._Test + '.png')
def saveControlImage(self, tmpimg=''):
# don't save control images for RenderVsOtherOutput (Vs) tests, since
# those control images belong to a different test result
if ('PAL_CONTROL_IMAGE' not in os.environ or
'Vs' in self._TestGroup):
return
imgpath = self.controlImagePath()
testdir = os.path.dirname(imgpath)
if not os.path.exists(testdir):
os.makedirs(testdir)
imgbasepath = \
os.path.join(testdir,
os.path.splitext(os.path.basename(imgpath))[0])
# remove any existing control images
for f in glob.glob(imgbasepath + '.*'):
if os.path.exists(f):
os.remove(f)
qDebug('Control image for {0}.{1}'.format(self._TestGroup,
self._TestFunction))
if not tmpimg:
# TODO: this can be deprecated, when per-base-test-class rendering
# in checkTest() is verified OK for all classes
qDebug('Rendering control to: {0}'.format(imgpath))
ms = self._MapSettings # class settings
""":type: QgsMapSettings"""
settings_type = 'Class'
if self._TestMapSettings is not None:
ms = self._TestMapSettings # per test settings
settings_type = 'Test'
qDebug('MapSettings type: {0}'.format(settings_type))
img = renderMapToImage(ms, parallel=False)
""":type: QImage"""
tmpimg = getTempfilePath('png')
if not img.save(tmpimg, 'png'):
os.unlink(tmpimg)
raise OSError('Control not created for: {0}'.format(imgpath))
if tmpimg and os.path.exists(tmpimg):
qDebug('Copying control to: {0}'.format(imgpath))
shutil.copyfile(tmpimg, imgpath)
else:
raise OSError('Control not copied to: {0}'.format(imgpath))
def renderCheck(self, mismatch=0, colortol=0, imgpath='', grpprefix=''):
"""Check rendered map canvas or existing image against control image
:mismatch: number of pixels different from control, and still valid
:colortol: maximum difference for each color component including alpha
:imgpath: existing image; if present, skips rendering canvas
:grpprefix: compare test image/rendering against different test group
"""
if not grpprefix:
grpprefix = self._TestGroupPrefix
chk = QgsMultiRenderChecker()
chk.setControlPathPrefix('expected_' + grpprefix)
chk.setControlName(self._Test)
if imgpath:
chk.setRenderedImage(imgpath)
ms = self._MapSettings # class settings
if self._TestMapSettings is not None:
ms = self._TestMapSettings # per test settings
chk.setMapSettings(ms)
chk.setColorTolerance(colortol)
# noinspection PyUnusedLocal
res = chk.runTest(self._Test, mismatch)
if PALREPORT and not res: # don't report OK checks
testname = self._TestGroup + ' . ' + self._Test
PALREPORTS[testname] = chk.report()
msg = '\nRender check failed for "{0}"'.format(self._Test)
return res, msg
def checkTest(self, **kwargs):
"""Intended to be overridden in subclasses"""
pass
class TestPALConfig(TestQgsPalLabeling):
@classmethod
def setUpClass(cls):
TestQgsPalLabeling.setUpClass()
cls.layer = TestQgsPalLabeling.loadFeatureLayer('point')
@classmethod
def tearDownClass(cls):
cls.removeMapLayer(cls.layer)
def setUp(self):
"""Run before each test."""
self.configTest('pal_base', 'base')
def tearDown(self):
"""Run after each test."""
pass
def test_default_pal_disabled(self):
# Verify PAL labeling is disabled for layer by default
palset = self.layer.customProperty('labeling', '')
msg = '\nExpected: Empty string\nGot: {0}'.format(palset)
self.assertEqual(palset, '', msg)
def test_settings_no_labeling(self):
self.layer.setLabeling(None)
self.assertEqual(None, self.layer.labeling())
def test_layer_pal_activated(self):
# Verify, via engine, that PAL labeling can be activated for layer
lyr = self.defaultLayerSettings()
self.layer.setLabeling(QgsVectorLayerSimpleLabeling(lyr))
msg = '\nLayer labeling not activated, as reported by labelingEngine'
self.assertTrue(QgsPalLabeling.staticWillUseLayer(self.layer), msg)
def test_write_read_settings(self):
# Verify written PAL settings are same when read from layer
# load and write default test settings
lyr1 = self.defaultLayerSettings()
lyr1dict = self.settingsDict(lyr1)
# print(lyr1dict)
self.layer.setLabeling(QgsVectorLayerSimpleLabeling(lyr1))
# read settings
lyr2 = self.layer.labeling().settings()
lyr2dict = self.settingsDict(lyr2)
# print(lyr2dict)
msg = '\nLayer settings read not same as settings written'
self.assertDictEqual(lyr1dict, lyr2dict, msg)
def test_default_partials_labels_enabled(self):
# Verify ShowingPartialsLabels is enabled for PAL by default
engine_settings = QgsLabelingEngineSettings()
self.assertTrue(engine_settings.testFlag(QgsLabelingEngineSettings.UsePartialCandidates))
def test_partials_labels_activate(self):
engine_settings = QgsLabelingEngineSettings()
# Enable partials labels
engine_settings.setFlag(QgsLabelingEngineSettings.UsePartialCandidates)
self.assertTrue(engine_settings.testFlag(QgsLabelingEngineSettings.UsePartialCandidates))
def test_partials_labels_deactivate(self):
engine_settings = QgsLabelingEngineSettings()
# Disable partials labels
engine_settings.setFlag(QgsLabelingEngineSettings.UsePartialCandidates, False)
self.assertFalse(engine_settings.testFlag(QgsLabelingEngineSettings.UsePartialCandidates))
# noinspection PyPep8Naming,PyShadowingNames
def runSuite(module, tests):
"""This allows for a list of test names to be selectively run.
Also, ensures unittest verbose output comes at end, after debug output"""
loader = unittest.defaultTestLoader
if 'PAL_SUITE' in os.environ:
if tests:
suite = loader.loadTestsFromNames(tests, module)
else:
raise Exception(
"\n\n####__ 'PAL_SUITE' set, but no tests specified __####\n")
else:
suite = loader.loadTestsFromModule(module)
verb = 2 if 'PAL_VERBOSE' in os.environ else 0
res = unittest.TextTestRunner(verbosity=verb).run(suite)
if PALREPORTS:
teststamp = 'PAL Test Report: ' + \
datetime.datetime.now().strftime('%Y-%m-%d %X')
report = '<html><head><title>{0}</title></head><body>'.format(teststamp)
report += '\n<h2>Failed Tests: {0}</h2>'.format(len(PALREPORTS))
for k, v in list(PALREPORTS.items()):
report += '\n<h3>{0}</h3>\n{1}'.format(k, v)
report += '</body></html>'
tmp_name = getTempfilePath('html')
with open(tmp_name, 'wt') as report_file:
report_file.write(report)
openInBrowserTab('file://' + tmp_name)
return res
if __name__ == '__main__':
# NOTE: unless PAL_SUITE env var is set all test class methods will be run
# ex: 'TestGroup(Point|Line|Curved|Polygon|Feature).test_method'
suite = [
'TestPALConfig.test_write_read_settings'
]
res = runSuite(sys.modules[__name__], suite)
sys.exit(not res.wasSuccessful())
| gpl-2.0 | 5,134,637,443,551,917,000 | 34.84778 | 108 | 0.631517 | false |
Paulloz/godot | modules/mono/build_scripts/make_android_mono_config.py | 8 | 1446 | def generate_compressed_config(config_src, output_dir):
import os.path
# Source file
with open(os.path.join(output_dir, "android_mono_config.gen.cpp"), "w") as cpp:
with open(config_src, "rb") as f:
buf = f.read()
decompr_size = len(buf)
import zlib
buf = zlib.compress(buf)
compr_size = len(buf)
bytes_seq_str = ""
for i, buf_idx in enumerate(range(compr_size)):
if i > 0:
bytes_seq_str += ", "
bytes_seq_str += str(buf[buf_idx])
cpp.write(
"""/* THIS FILE IS GENERATED DO NOT EDIT */
#include "android_mono_config.h"
#ifdef ANDROID_ENABLED
#include "core/io/compression.h"
namespace {
// config
static const int config_compressed_size = %d;
static const int config_uncompressed_size = %d;
static const unsigned char config_compressed_data[] = { %s };
} // namespace
String get_godot_android_mono_config() {
Vector<uint8_t> data;
data.resize(config_uncompressed_size);
uint8_t* w = data.ptrw();
Compression::decompress(w.ptr(), config_uncompressed_size, config_compressed_data,
config_compressed_size, Compression::MODE_DEFLATE);
String s;
if (s.parse_utf8((const char *)w.ptr(), data.size())) {
ERR_FAIL_V(String());
}
return s;
}
#endif // ANDROID_ENABLED
"""
% (compr_size, decompr_size, bytes_seq_str)
)
| mit | -3,742,968,783,921,075,000 | 25.777778 | 83 | 0.591978 | false |
carthach/essentia | test/src/unittests/filters/test_movingaverage.py | 1 | 2087 | #!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestMovingAverage(TestCase):
def testRegression(self):
# check moving average for size = 6 and input signal of 10 elements
input = [1]*10
expected = [ 1./6, 2./6, 3./6, 4./6., 5./6., 1., 1., 1., 1., 1. ]
self.assertAlmostEqualVector(MovingAverage(size=6)(input), expected)
def testOneByOne(self):
# we compare here that filtering an array all at once or the samples
# one by one will yield the same result
input = [1]*10
expected = [ 1./4, 2./4, 3./4, 1., 1., 1., 1., 1., 1., 1. ]
filt = MovingAverage(size=4)
self.assertAlmostEqualVector(filt(input), expected)
# need to reset the filter here!!
filt.reset()
result = []
for sample in input:
result += list(filt([sample]))
self.assertAlmostEqualVector(result, expected)
def testZero(self):
self.assertEqualVector(MovingAverage()(zeros(20)), zeros(20))
def testInvalidParam(self):
self.assertConfigureFails(MovingAverage(), {'size': 0})
def testEmpty(self):
self.assertEqualVector(MovingAverage()([]), [])
suite = allTests(TestMovingAverage)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 | 8,423,727,334,861,143,000 | 28.394366 | 79 | 0.662674 | false |
stuarthodgson/cocotb | cocotb/regression.py | 2 | 16392 | ''' Copyright (c) 2013 Potential Ventures Ltd
Copyright (c) 2013 SolarFlare Communications Inc
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Potential Ventures Ltd,
SolarFlare Communications Inc nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''
"""
All things relating to regression capabilities
"""
import time
import logging
import inspect
from itertools import product
import sys
import os
# For autodocumentation don't need the extension modules
if "SPHINX_BUILD" in os.environ:
simulator = None
else:
import simulator
# Optional support for coverage collection of testbench files
coverage = None
if "COVERAGE" in os.environ:
try:
import coverage
except ImportError as e:
msg = ("Coverage collection requested but coverage module not availble"
"\n"
"Import error was: %s\n" % repr(e))
sys.stderr.write(msg)
import cocotb
import cocotb.ANSI as ANSI
from cocotb.log import SimLog
from cocotb.result import TestError, TestFailure, TestSuccess, SimFailure
from cocotb.xunit_reporter import XUnitReporter
def _my_import(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class RegressionManager(object):
"""Encapsulates all regression capability into a single place"""
def __init__(self, root_name, modules, tests=None):
"""
Args:
modules (list): A list of python module names to run
Kwargs
"""
self._queue = []
self._root_name = root_name
self._dut = None
self._modules = modules
self._functions = tests
self._running_test = None
self._cov = None
self.log = SimLog("cocotb.regression")
def initialise(self):
self.ntests = 0
self.count = 1
self.skipped = 0
self.failures = 0
self.xunit = XUnitReporter()
self.xunit.add_testsuite(name="all", tests=repr(self.ntests),
package="all")
if coverage is not None:
self.log.info("Enabling coverage collection of Python code")
self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
self._cov.start()
self._dut = cocotb.handle.SimHandle(simulator.get_root_handle(
self._root_name))
if self._dut is None:
raise AttributeError("Can not find Root Handle (%s)" %
self._root_name)
# Auto discovery
for module_name in self._modules:
module = _my_import(module_name)
if self._functions:
# Specific functions specified, don't auto discover
for test in self._functions.rsplit(','):
if not hasattr(module, test):
raise AttributeError("Test %s doesn't exist in %s" %
(test, module_name))
self._queue.append(getattr(module, test)(self._dut))
self.ntests += 1
break
for thing in vars(module).values():
if hasattr(thing, "im_test"):
try:
test = thing(self._dut)
skip = test.skip
except TestError:
skip = True
self.log.warning("Failed to initialise test %s" %
thing.name)
if skip:
self.log.info("Skipping test %s" % thing.name)
self.xunit.add_testcase(name=thing.name,
classname=module_name,
time="0.0")
self.xunit.add_skipped()
self.skipped += 1
else:
self._queue.append(test)
self.ntests += 1
self._queue.sort(key=lambda test: "%s.%s" %
(test.module, test.funcname))
for valid_tests in self._queue:
self.log.info("Found test %s.%s" %
(valid_tests.module,
valid_tests.funcname))
def tear_down(self):
"""It's the end of the world as we know it"""
if self.failures:
self.log.error("Failed %d out of %d tests (%d skipped)" %
(self.failures, self.count - 1, self.skipped))
else:
self.log.info("Passed %d tests (%d skipped)" %
(self.count - 1, self.skipped))
if self._cov:
self._cov.stop()
self.log.info("Writing coverage data")
self._cov.save()
self._cov.html_report()
self.log.info("Shutting down...")
self.xunit.write()
simulator.stop_simulator()
def next_test(self):
"""Get the next test to run"""
if not self._queue:
return None
return self._queue.pop(0)
def handle_result(self, result):
"""Handle a test result
Dumps result to XML and schedules the next test (if any)
Args: result (TestComplete exception)
"""
self.xunit.add_testcase(name=self._running_test.funcname,
classname=self._running_test.module,
time=repr(time.time() -
self._running_test.start_time))
running_test_funcname = self._running_test.funcname
# Helper for logging result
def _result_was():
result_was = ("%s (result was %s)" %
(running_test_funcname, result.__class__.__name__))
return result_was
if (isinstance(result, TestSuccess) and
not self._running_test.expect_fail and
not self._running_test.expect_error):
self.log.info("Test Passed: %s" % running_test_funcname)
elif (isinstance(result, TestFailure) and
self._running_test.expect_fail):
self.log.info("Test failed as expected: " + _result_was())
elif (isinstance(result, TestSuccess) and
self._running_test.expect_error):
self.log.error("Test passed but we expected an error: " +
_result_was())
self.xunit.add_failure(stdout=repr(str(result)),
stderr="\n".join(
self._running_test.error_messages))
self.failures += 1
elif isinstance(result, TestSuccess):
self.log.error("Test passed but we expected a failure: " +
_result_was())
self.xunit.add_failure(stdout=repr(str(result)),
stderr="\n".join(
self._running_test.error_messages))
self.failures += 1
elif isinstance(result, TestError) and self._running_test.expect_error:
self.log.info("Test errored as expected: " + _result_was())
elif isinstance(result, SimFailure):
if self._running_test.expect_error:
self.log.info("Test errored as expected: " + _result_was())
else:
self.log.error("Test error has lead to simulator shuttting us "
"down")
self.failures += 1
self.tear_down()
return
else:
self.log.error("Test Failed: " + _result_was())
self.xunit.add_failure(stdout=repr(str(result)),
stderr="\n".join(
self._running_test.error_messages))
self.failures += 1
self.execute()
def execute(self):
self._running_test = cocotb.regression.next_test()
if self._running_test:
# Want this to stand out a little bit
self.log.info("%sRunning test %d/%d:%s %s" %
(ANSI.BLUE_BG + ANSI.BLACK_FG,
self.count, self.ntests,
ANSI.DEFAULT_FG + ANSI.DEFAULT_BG,
self._running_test.funcname))
if self.count is 1:
test = cocotb.scheduler.add(self._running_test)
else:
test = cocotb.scheduler.new_test(self._running_test)
self.count += 1
else:
self.tear_down()
def _create_test(function, name, documentation, mod, *args, **kwargs):
"""Factory function to create tests, avoids late binding
Creates a test dynamically. The test will call the supplied
function with the supplied arguments.
Args:
function: (function) the test function to run
name: (string) the name of the test
documentation: (string) the docstring for the test
mod: (module) the module this function belongs to
*args: remaining args to pass to test function
Kwaygs:
**kwargs: passed to the test function
Returns:
decorated test function
"""
def _my_test(dut):
yield function(dut, *args, **kwargs)
_my_test.__name__ = name
_my_test.__doc__ = documentation
_my_test.__module__ = mod.__name__
return cocotb.test()(_my_test)
class TestFactory(object):
"""
Used to automatically generate tests.
Assuming we have a common test function that will run a test. This test
function will take keyword arguments (for example generators for each of
the input interfaces) and generate tests that call the supplied function.
This Factory allows us to generate sets of tests based on the different
permutations of the possible arguments to the test function.
For example if we have a module that takes backpressure and idles and
have some packet generations routines gen_a and gen_b.
>>> tf = TestFactory(run_test)
>>> tf.add_option('data_in', [gen_a, gen_b])
>>> tf.add_option('backpressure', [None, random_backpressure])
>>> tf.add_option('idles', [None, random_idles])
>>> tf.generate_tests()
We would get the following tests:
* gen_a with no backpressure and no idles
* gen_a with no backpressure and random_idles
* gen_a with random_backpressure and no idles
* gen_a with random_backpressure and random_idles
* gen_b with no backpressure and no idles
* gen_b with no backpressure and random_idles
* gen_b with random_backpressure and no idles
* gen_b with random_backpressure and random_idles
The tests are appended to the calling module for auto-discovery.
Tests are simply named test_function_N. The docstring for the test (hence
the test description) includes the name and description of each generator.
"""
def __init__(self, test_function, *args, **kwargs):
"""
Args:
test_function (function): the function that executes a test.
Must take 'dut' as the first argument.
*args: Remaining args are passed directly to the test function.
Note that these arguments are not varied. An argument that
varies with each test must be a keyword argument to the
test function.
*kwargs: Remaining kwargs are passed directly to the test function.
Note that these arguments are not varied. An argument that
varies with each test must be a keyword argument to the
test function.
"""
if not isinstance(test_function, cocotb.coroutine):
raise TypeError("TestFactory requires a cocotb coroutine")
self.test_function = test_function
self.name = self.test_function._func.__name__
self.args = args
self.kwargs_constant = kwargs
self.kwargs = {}
def add_option(self, name, optionlist):
"""Add a named option to the test.
Args:
name (string): name of the option. passed to test as a keyword
argument
optionlist (list): A list of possible options for this test knob
"""
self.kwargs[name] = optionlist
def generate_tests(self, prefix="", postfix=""):
"""
Generates exhasutive set of tests using the cartesian product of the
possible keyword arguments.
The generated tests are appended to the namespace of the calling
module.
Args:
prefix: Text string to append to start of test_function name
when naming generated test cases. This allows reuse of
a single test_function with multiple TestFactories without
name clashes.
postfix: Text string to append to end of test_function name
when naming generated test cases. This allows reuse of
a single test_function with multiple TestFactories without
name clashes.
"""
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
d = self.kwargs
for index, testoptions in enumerate((
dict(zip(d, v)) for v in
product(*d.values())
)):
name = "%s%s%s_%03d" % (prefix, self.name, postfix, index + 1)
doc = "Automatically generated test\n\n"
for optname, optvalue in testoptions.items():
if callable(optvalue):
if not optvalue.__doc__:
desc = "No docstring supplied"
else:
desc = optvalue.__doc__.split('\n')[0]
doc += "\t%s: %s (%s)\n" % (optname, optvalue.__name__,
desc)
else:
doc += "\t%s: %s\n" % (optname, repr(optvalue))
cocotb.log.debug("Adding generated test \"%s\" to module \"%s\"" %
(name, mod.__name__))
kwargs = {}
kwargs.update(self.kwargs_constant)
kwargs.update(testoptions)
if hasattr(mod, name):
cocotb.log.error("Overwriting %s in module %s. "
"This causes previously defined testcase "
"not to be run. Consider setting/changing "
"name_postfix" % (name, mod))
setattr(mod, name, _create_test(self.test_function, name, doc, mod,
*self.args, **kwargs))
| bsd-3-clause | -2,425,440,293,360,589,000 | 37.843602 | 79 | 0.555698 | false |
aametwally/cloudSACA | cloudSACA_Azure/EHPC-Server.py | 1 | 3280 | #!/usr/bin/python
from Crypto.PublicKey import RSA
from Crypto import Random
import socket,threading,subprocess,os,base64,xml.dom.minidom
import config,PBS,Response,Request,ServerJob as Job
JOBS={}
EOM="\n\n###"
def startServer():
port=int(config.port)
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
s.bind(('',port))
s.listen(5)
print "Server started at " + str(port)
return s
def importKey():
file=open(config.publicKey,'r')
st = "".join(file.readlines())
key = RSA.importKey(st)
return key
def validReq(req):
key =importKey()
decrypted=key.decrypt(base64.decodestring(req["sec"]))
if (req["JobID"] + req["Owner"]==decrypted):
return True
else: return False
def HandleClient(clientsock):
name = threading.currentThread().getName()
print name, ' Started.............'
global EOM
chunks=[]
while 1:
buf = clientsock.recv(2048)
chunks.append(str(buf))
if (EOM in chunks[-1]):
msg= "".join(chunks)[:-5]
if (msg=="TEST: HELLO"):
return
req =Request.parseRequest(msg)
if (not validReq(req)):
Response.sendData(clientsock,"Invalid Request")
print "invalid request"
clientsock.close()
return
if (req["requestType"]=="SUBMIT"):
job=Request.parseJob(msg)
global JOBS
if (req["Owner"]=="system" or req["Owner"]=="utils"):
res= PBS.run(job["command"],req["JobID"])
if req["Owner"]=="system":
Response.sendData(clientsock,"Done")
else:
Response.sendData(clientsock,res)
elif req["Owner"]=="ubuntu":
res= PBS.run("su ubuntu -c '"+job["command"]+"'",req["JobID"])
Response.sendData(clientsock,res)
elif req["Owner"]=="ehpcuser":
res= PBS.run("su ehpcuser -c '"+job["command"]+"'",req["JobID"])
Response.sendData(clientsock,res)
else:
print "command:" + job["command"]
# print "inputs:" + job["inputs"]
c = PBS.AddFileWrapper("direct",job["command"],job["inputs"],job["outputs"])
id= PBS.runAsPBSJob(req["Owner"],req["JobID"],c)
Response.sendData(clientsock,'recieved:'+id)
clientsock.close()
elif (req["requestType"]=="STATUS"):
status=Job.checkPBSstatus(req["JobID"])
Response.sendData(clientsock,Job.perpareJobStatusMessage(req,status))
elif (req["requestType"]=="FETCH"):
OutputList = Request.getOutputList(msg)
response = Response.generateResponse(msg,OutputList)
Response.sendData(clientsock,response)
elif (req["requestType"]=='UPLOAD'):
UploadDict = Request.parseUpload(msg)
for key, value in UploadFileData.iteritems():
createFile = open(key, "wb")
while True:
data = conn.recv(1024)
createFile.write(value)
createFile.close()
break
s=startServer()
i=0
while 1:
clientsock,clientaddr=s.accept()
i+=1
print 'Got connection from ' , clientsock.getpeername()
t=threading.Thread(target=HandleClient,args=[clientsock], name="Thread #" + str(i))
t.start()
sys.exit(0)
| gpl-3.0 | -6,639,096,629,647,590,000 | 32.814433 | 104 | 0.602439 | false |
derekstavis/bluntly | vendor/github.com/youtube/vitess/py/vttest/vt_processes.py | 1 | 7105 | # Copyright 2015 Google Inc. All Rights Reserved.
"""Starts the vtcombo process."""
import json
import logging
import os
import socket
import subprocess
import time
import urllib
from google.protobuf import text_format
from vttest import environment
class VtProcess(object):
"""Base class for a vt process, vtcombo only now."""
START_RETRIES = 5
def __init__(self, name, directory, binary, port_name):
self.name = name
self.directory = directory
self.binary = binary
self.extraparams = []
self.port_name = port_name
self.process = None
def wait_start(self):
"""Start the process and wait for it to respond on HTTP."""
for _ in xrange(0, self.START_RETRIES):
self.port = environment.get_port(self.port_name)
if environment.get_protocol() == 'grpc':
self.grpc_port = environment.get_port(self.port_name, protocol='grpc')
else:
self.grpc_port = None
logs_subdirectory = environment.get_logs_directory(self.directory)
cmd = [
self.binary,
'-port', '%u' % self.port,
'-log_dir', logs_subdirectory,
]
if environment.get_protocol() == 'grpc':
cmd.extend(['-grpc_port', '%u' % self.grpc_port])
cmd.extend(self.extraparams)
logging.info('Starting process: %s', cmd)
stdout = os.path.join(logs_subdirectory, '%s.%d.log' %
(self.name, self.port))
self.stdout = open(stdout, 'w')
self.process = subprocess.Popen(cmd,
stdout=self.stdout,
stderr=subprocess.STDOUT)
timeout = time.time() + 60.0
while time.time() < timeout:
if environment.process_is_healthy(
self.name, self.addr()) and self.get_vars():
logging.info('%s started.', self.name)
return
elif self.process.poll() is not None:
logging.error('%s process exited prematurely.', self.name)
break
time.sleep(0.3)
logging.error('cannot start %s process on time: %s ',
self.name, socket.getfqdn())
self.kill()
raise Exception('Failed %d times to run %s' % (
self.START_RETRIES,
self.name))
def addr(self):
"""Return the host:port of the process."""
return '%s:%u' % (socket.getfqdn(), self.port)
def grpc_addr(self):
"""Get the grpc address of the process.
Returns:
the grpc host:port of the process.
Only call this is environment.get_protocol() == 'grpc'.
"""
return '%s:%u' % (socket.getfqdn(), self.grpc_port)
def get_vars(self):
"""Return the debug vars."""
data = None
try:
url = 'http://%s/debug/vars' % self.addr()
f = urllib.urlopen(url)
data = f.read()
f.close()
except IOError:
return None
try:
return json.loads(data)
except ValueError:
logging.error('%s', data)
raise
def kill(self):
"""Kill the process."""
# These will proceed without error even if the process is already gone.
self.process.terminate()
def wait(self):
"""Wait for the process to end."""
self.process.wait()
class VtcomboProcess(VtProcess):
"""Represents a vtcombo subprocess."""
QUERYSERVER_PARAMETERS = [
'-queryserver-config-pool-size', '4',
'-queryserver-config-query-timeout', '300',
'-queryserver-config-schema-reload-time', '60',
'-queryserver-config-stream-pool-size', '4',
'-queryserver-config-transaction-cap', '4',
'-queryserver-config-transaction-timeout', '300',
'-queryserver-config-txpool-timeout', '300',
]
def __init__(self, directory, topology, mysql_db, schema_dir, charset,
web_dir=None, web_dir2=None):
VtProcess.__init__(self, 'vtcombo-%s' % os.environ['USER'], directory,
environment.vtcombo_binary, port_name='vtcombo')
self.extraparams = [
'-db-config-app-charset', charset,
'-db-config-app-uname', mysql_db.username(),
'-db-config-app-pass', mysql_db.password(),
'-db-config-dba-charset', charset,
'-db-config-dba-uname', mysql_db.username(),
'-db-config-dba-pass', mysql_db.password(),
'-proto_topo', text_format.MessageToString(topology, as_one_line=True),
'-mycnf_server_id', '1',
'-mycnf_socket_file', mysql_db.unix_socket(),
'-normalize_queries',
] + self.QUERYSERVER_PARAMETERS + environment.extra_vtcombo_parameters()
if schema_dir:
self.extraparams.extend(['-schema_dir', schema_dir])
if web_dir:
self.extraparams.extend(['-web_dir', web_dir])
if web_dir2:
self.extraparams.extend(['-web_dir2', web_dir2])
if mysql_db.unix_socket():
self.extraparams.extend(
['-db-config-app-unixsocket', mysql_db.unix_socket(),
'-db-config-dba-unixsocket', mysql_db.unix_socket()])
else:
self.extraparams.extend(
['-db-config-app-host', mysql_db.hostname(),
'-db-config-app-port', str(mysql_db.port()),
'-db-config-dba-host', mysql_db.hostname(),
'-db-config-dba-port', str(mysql_db.port())])
vtcombo_process = None
def start_vt_processes(directory, topology, mysql_db, schema_dir,
charset='utf8', web_dir=None, web_dir2=None):
"""Start the vt processes.
Args:
directory: the toplevel directory for the processes (logs, ...)
topology: a vttest.VTTestTopology object.
mysql_db: an instance of the mysql_db.MySqlDB class.
schema_dir: the directory that contains the schema / vschema.
charset: the character set for the database connections.
web_dir: contains the web app for vtctld side of vtcombo.
web_dir2: contains the web app for vtctld side of vtcombo.
"""
global vtcombo_process
logging.info('start_vt_processes(directory=%s,vtcombo_binary=%s)',
directory, environment.vtcombo_binary)
vtcombo_process = VtcomboProcess(directory, topology, mysql_db, schema_dir,
charset, web_dir=web_dir, web_dir2=web_dir2)
vtcombo_process.wait_start()
def kill_vt_processes():
"""Call kill() on all processes."""
logging.info('kill_vt_processes()')
if vtcombo_process:
vtcombo_process.kill()
def wait_vt_processes():
"""Call wait() on all processes."""
logging.info('wait_vt_processes()')
if vtcombo_process:
vtcombo_process.wait()
def kill_and_wait_vt_processes():
"""Call kill() and then wait() on all processes."""
kill_vt_processes()
wait_vt_processes()
# wait_step is a helper for looping until a condition is true.
# use as follow:
# timeout = 10
# while True:
# if done:
# break
# timeout = utils.wait_step('condition', timeout)
def wait_step(msg, timeout, sleep_time=1.0):
timeout -= sleep_time
if timeout <= 0:
raise Exception("timeout waiting for condition '%s'" % msg)
logging.debug("Sleeping for %f seconds waiting for condition '%s'",
sleep_time, msg)
time.sleep(sleep_time)
return timeout
| mit | -4,666,950,833,032,231,000 | 31.295455 | 79 | 0.615482 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/scripts/tests/test_runlaunchpad.py | 1 | 6240 | # Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for runlaunchpad.py"""
__metaclass__ = type
__all__ = [
'CommandLineArgumentProcessing',
'ServersToStart',
]
import os
import shutil
import tempfile
import testtools
from lp.scripts.runlaunchpad import (
get_services_to_run,
process_config_arguments,
SERVICES,
split_out_runlaunchpad_arguments,
)
import lp.services.config
from lp.services.config import config
import lp.testing
class CommandLineArgumentProcessing(lp.testing.TestCase):
"""runlaunchpad.py's command line arguments fall into two parts. The first
part specifies which services to run, then second part is passed directly
on to the Zope webserver start up.
"""
def test_no_parameter(self):
"""Given no arguments, return no services and no Zope arguments."""
self.assertEqual(([], []), split_out_runlaunchpad_arguments([]))
def test_run_options(self):
"""Services to run are specified with an optional `-r` option.
If a service is specified, it should appear as the first value in the
returned tuple.
"""
self.assertEqual(
(['foo'], []), split_out_runlaunchpad_arguments(['-r', 'foo']))
def test_run_lots_of_things(self):
"""The `-r` option can be used to specify multiple services.
Multiple services are separated with commas. e.g. `-r foo,bar`.
"""
self.assertEqual(
(['foo', 'bar'], []),
split_out_runlaunchpad_arguments(['-r', 'foo,bar']))
def test_run_with_zope_params(self):
"""Any arguments after the initial `-r` option should be passed
straight through to Zope.
"""
self.assertEqual(
(['foo', 'bar'], ['-o', 'foo', '--bar=baz']),
split_out_runlaunchpad_arguments(['-r', 'foo,bar', '-o', 'foo',
'--bar=baz']))
def test_run_with_only_zope_params(self):
"""Pass all the options to zope when the `-r` option is not given."""
self.assertEqual(
([], ['-o', 'foo', '--bar=baz']),
split_out_runlaunchpad_arguments(['-o', 'foo', '--bar=baz']))
class TestDefaultConfigArgument(lp.testing.TestCase):
"""Tests for the processing of the -C argument."""
def setUp(self):
super(TestDefaultConfigArgument, self).setUp()
self.config_root = tempfile.mkdtemp('configs')
self.saved_instance = config.instance_name
self.saved_config_roots = lp.services.config.CONFIG_ROOT_DIRS
lp.services.config.CONFIG_ROOT_DIRS = [self.config_root]
self.addCleanup(self.cleanUp)
def cleanUp(self):
shutil.rmtree(self.config_root)
lp.services.config.CONFIG_ROOT_DIRS = self.saved_config_roots
config.setInstance(self.saved_instance)
def test_keep_argument(self):
"""Make sure that a -C is processed unchanged."""
self.assertEqual(
['-v', '-C', 'a_file.conf', '-h'],
process_config_arguments(['-v', '-C', 'a_file.conf', '-h']))
def test_default_config(self):
"""Make sure that the -C option is set to the correct instance."""
instance_config_dir = os.path.join(self.config_root, 'instance1')
os.mkdir(instance_config_dir)
open(os.path.join(instance_config_dir, 'launchpad.conf'), 'w').close()
config.setInstance('instance1')
self.assertEqual(
['-a_flag', '-C', '%s/launchpad.conf' % instance_config_dir],
process_config_arguments(['-a_flag']))
def test_instance_not_found_raises_ValueError(self):
"""Make sure that an unknown instance fails."""
config.setInstance('unknown')
self.assertRaises(ValueError, process_config_arguments, [])
def test_i_sets_the_instance(self):
"""The -i parameter will set the config instance name."""
instance_config_dir = os.path.join(self.config_root, 'test')
os.mkdir(instance_config_dir)
open(os.path.join(instance_config_dir, 'launchpad.conf'), 'w').close()
self.assertEquals(
['-o', 'foo', '-C', '%s/launchpad.conf' % instance_config_dir],
process_config_arguments(
['-i', 'test', '-o', 'foo']))
self.assertEquals('test', config.instance_name)
class ServersToStart(testtools.TestCase):
"""Test server startup control."""
def setUp(self):
"""Make sure that only the Librarian is configured to launch."""
testtools.TestCase.setUp(self)
launch_data = """
[librarian_server]
launch: True
[codehosting]
launch: False
[launchpad]
launch: False
"""
config.push('launch_data', launch_data)
self.addCleanup(config.pop, 'launch_data')
def test_nothing_explictly_requested(self):
"""Implicitly start services based on the config.*.launch property.
"""
services = sorted(get_services_to_run([]))
expected = [SERVICES['librarian']]
# Mailman may or may not be asked to run.
if config.mailman.launch:
expected.append(SERVICES['mailman'])
# Likewise, the GoogleWebService may or may not be asked to
# run.
if config.google_test_service.launch:
expected.append(SERVICES['google-webservice'])
# RabbitMQ may or may not be asked to run.
if config.rabbitmq.launch:
expected.append(SERVICES['rabbitmq'])
# TxLongPoll may or may not be asked to run.
if config.txlongpoll.launch:
expected.append(SERVICES['txlongpoll'])
expected = sorted(expected)
self.assertEqual(expected, services)
def test_explicit_request_overrides(self):
"""Only start those services which are explictly requested, ignoring
the configuration properties.
"""
services = get_services_to_run(['sftp'])
self.assertEqual([SERVICES['sftp']], services)
def test_launchpad_systems_red(self):
self.failIf(config.launchpad.launch)
| agpl-3.0 | -9,127,931,084,640,641,000 | 34.862069 | 78 | 0.6125 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/it/cinemamega.py | 1 | 3673 | import re
import requests,time
import base64
import xbmc,xbmcaddon
from ..scraper import Scraper
from ..common import clean_search, clean_title,send_log,error_log
dev_log = xbmcaddon.Addon('script.module.universalscrapers').getSetting("dev_log")
from ..modules import cfscrape
class cinemamega(Scraper):
domains = ['cinemamega.net']
name = "CinemaMega"
sources = []
def __init__(self):
self.base_link = 'http://www1.cinemamega.net'
self.scraper = cfscrape.create_scraper()
if dev_log=='true':
self.start_time = time.time()
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
start_url = self.base_link+'/search-movies/'+title.replace(' ','+')+'+season+'+season+'.html'
html = self.scraper.get(start_url,timeout=3,sleep=10).content
match = re.compile('<div class="ml-item">.+?href="(.+?)".+?onmouseover.+?<i>(.+?)</i>.+?Release: (.+?)<',re.DOTALL).findall(html)
for url,name,release_year in match:
clean_title_,clean_season = re.findall('(.+?): Season (.+?)>',str(name)+'>')[0]
if clean_title(clean_title_)==clean_title(title) and clean_season == season:
html2 = requests.get(url).content
match = re.findall('<a class="episode.+?href="(.+?)">(.+?)</a>',html2)
for url2,episode_ in match:
if episode_ == episode:
self.get_source(url2)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,'Check Search')
return self.sources
def scrape_movie(self, title, year, imdb, debrid = False):
try:
start_url = self.base_link+'/search-movies/'+title.replace(' ','+')+'.html'
html = self.scraper.get(start_url,timeout=3).content
#print html
match = re.compile('<div class="ml-item">.+?href="(.+?)".+?onmouseover.+?<i>(.+?)</i>.+?Release: (.+?)<',re.DOTALL).findall(html)
for url,name,release_year in match:
#print url
if clean_title(name)==clean_title(title) and year == release_year:
print url
self.get_source(url)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,'Check Search')
return self.sources
def get_source(self,link):
try:
count = 0
html = self.scraper.get(link,timeout=3).content
frame = base64.decodestring(re.findall('Base64.decode.+?"(.+?)"',str(html))[0])
playlink = re.findall('src="(.+?)"',str(frame))[0]
source = re.findall('//(.+?)/',str(playlink))[0]
if 'entervideo' in source:
html = requests.get(url).content
m = re.findall('source src="(.+?)"',html)
for playlink in m:
playlink = playlink+'|User-Agent=Mozilla/5.0 (Windows NT 6.3; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0&'+'Referer='+url
sources.append({'source': 'Entervideo', 'quality': 'SD', 'scraper': self.name, 'url': playlink,'direct': True})
count +=1
if dev_log=='true':
end_time = time.time() - self.start_time
send_log(self.name,end_time,count)
except:
pass
| gpl-2.0 | 6,013,707,584,222,948,000 | 47.328947 | 142 | 0.519466 | false |
danmar/cppcheck | addons/test/test-misra.py | 1 | 5544 | # Running the test with Python 2:
# Be sure to install pytest version 4.6.4 (newer should also work)
# Command in cppcheck directory:
# python -m pytest addons/test/test-misra.py
#
# Running the test with Python 3:
# Command in cppcheck directory:
# PYTHONPATH=./addons python3 -m pytest addons/test/test-misra.py
import pytest
import re
import sys
from .util import dump_create, dump_remove, convert_json_output
TEST_SOURCE_FILES = ['./addons/test/misra/misra-test.c']
def setup_module(module):
for f in TEST_SOURCE_FILES:
dump_create(f)
def teardown_module(module):
for f in TEST_SOURCE_FILES:
dump_remove(f)
@pytest.fixture(scope="function")
def checker():
from addons.misra import MisraChecker, MisraSettings, get_args_parser
parser = get_args_parser()
args = parser.parse_args([])
settings = MisraSettings(args)
return MisraChecker(settings)
def test_loadRuleTexts_structure(checker):
checker.loadRuleTexts("./addons/test/misra/misra_rules_structure.txt")
assert(checker.ruleTexts.get(101, None) is None)
assert(checker.ruleTexts[102].text == "Rule text.")
assert(checker.ruleTexts.get(103, None) is None)
def test_loadRuleTexts_empty_lines(checker):
checker.loadRuleTexts("./addons/test/misra/misra_rules_empty_lines.txt")
assert(len(checker.ruleTexts) == 3)
assert(len(checker.ruleTexts[102].text) == len("Rule text."))
def test_loadRuleTexts_mutiple_lines(checker):
checker.loadRuleTexts("./addons/test/misra/misra_rules_multiple_lines.txt")
assert(checker.ruleTexts[101].text == "Multiple lines text.")
assert(checker.ruleTexts[102].text == "Multiple lines text.")
assert(checker.ruleTexts[103].text == "Multiple lines text.")
assert(checker.ruleTexts[104].text == "Should")
assert(checker.ruleTexts[105].text == "Should")
assert(checker.ruleTexts[106].text == "Can contain empty lines.")
def test_verifyRuleTexts(checker, capsys):
checker.loadRuleTexts("./addons/test/misra/misra_rules_dummy.txt")
checker.verifyRuleTexts()
captured = capsys.readouterr().out
assert("21.3" not in captured)
assert("1.3" in captured)
def test_rules_misra_severity(checker):
checker.loadRuleTexts("./addons/test/misra/misra_rules_dummy.txt")
assert(checker.ruleTexts[1004].misra_severity == 'Mandatory')
assert(checker.ruleTexts[401].misra_severity == 'Required')
assert(checker.ruleTexts[1505].misra_severity == 'Advisory')
assert(checker.ruleTexts[2104].misra_severity == '')
def test_json_out(checker, capsys):
sys.argv.append("--cli")
checker.loadRuleTexts("./addons/test/misra/misra_rules_dummy.txt")
checker.parseDump("./addons/test/misra/misra-test.c.dump")
captured = capsys.readouterr()
captured = captured.out.splitlines()
sys.argv.remove("--cli")
json_output = convert_json_output(captured)
assert("Mandatory" in json_output['c2012-10.4'][0]['extra'])
assert("Required" in json_output['c2012-21.3'][0]['extra'])
assert("Advisory" in json_output['c2012-20.1'][0]['extra'])
def test_rules_cppcheck_severity(checker, capsys):
checker.loadRuleTexts("./addons/test/misra/misra_rules_dummy.txt")
checker.parseDump("./addons/test/misra/misra-test.c.dump")
captured = capsys.readouterr().err
assert("(error)" not in captured)
assert("(warning)" not in captured)
assert("(style)" in captured)
def test_rules_cppcheck_severity_custom(checker, capsys):
checker.loadRuleTexts("./addons/test/misra/misra_rules_dummy.txt")
checker.setSeverity("custom-severity")
checker.parseDump("./addons/test/misra/misra-test.c.dump")
captured = capsys.readouterr().err
assert("(error)" not in captured)
assert("(warning)" not in captured)
assert("(style)" not in captured)
assert("(custom-severity)" in captured)
def test_rules_suppression(checker, capsys):
test_sources = ["addons/test/misra/misra-suppressions1-test.c",
"addons/test/misra/misra-suppressions2-test.c"]
for src in test_sources:
re_suppressed= r"\[%s\:[0-9]+\]" % src
dump_remove(src)
dump_create(src, "--suppressions-list=addons/test/misra/suppressions.txt")
checker.parseDump(src + ".dump")
captured = capsys.readouterr().err
found = re.search(re_suppressed, captured)
assert found is None, 'Unexptected output:\n' + captured
dump_remove(src)
def test_arguments_regression():
args_ok = ["-generate-table",
"--rule-texts=./addons/test/assets/misra_rules_multiple_lines.txt",
"--verify-rule-texts",
"-t=foo", "--template=foo",
"--suppress-rules=15.1",
"--quiet",
"--cli",
"--no-summary",
"--show-suppressed-rules",
"-P=src/", "--file-prefix=src/",
"--severity=misra-warning"]
# Arguments with expected SystemExit
args_exit = ["--non-exists", "--non-exists-param=42", "-h", "--help"]
from addons.misra import get_args_parser
for arg in args_exit:
sys.argv.append(arg)
with pytest.raises(SystemExit):
parser = get_args_parser()
parser.parse_args()
sys.argv.remove(arg)
for arg in args_ok:
sys.argv.append(arg)
try:
parser = get_args_parser()
parser.parse_args()
except SystemExit:
pytest.fail("Unexpected SystemExit with '%s'" % arg)
sys.argv.remove(arg)
| gpl-3.0 | -8,838,774,535,284,808,000 | 34.767742 | 82 | 0.660714 | false |
ccmbioinfo/mugqic_pipelines | bfx/exonerate.py | 1 | 1803 | #!/usr/bin/env python
################################################################################
# Copyright (C) 2014, 2015 GenAP, McGill University and Genome Quebec Innovation Centre
#
# This file is part of MUGQIC Pipelines.
#
# MUGQIC Pipelines is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MUGQIC Pipelines is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MUGQIC Pipelines. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# Python Standard Modules
# MUGQIC Modules
from core.config import *
from core.job import *
def fastareformat (input, output):
return Job(
input_files=[input],
output_files=[output],
command="fastareformat " + input + " > " + output,
module_entries=[['DEFAULT' , 'module_exonerate']]
)
def fastasplit (fasta, output_directory, output_basename, num_fasta_chunks):
return Job(
[fasta],
# fastasplit creates FASTA chunk files numbered with 7 digits and padded with leading 0s
[ os.path.join(output_directory, output_basename + "_{:07d}".format(i)) for i in range(num_fasta_chunks) ],
[['exonerate_fastasplit', 'module_exonerate']],
command="fastasplit -f " + fasta + " -o " + output_directory + " -c " + str(num_fasta_chunks)
) | lgpl-3.0 | -6,422,750,426,001,053,000 | 40.953488 | 115 | 0.637826 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyQt4/QtNetwork/QNetworkCookie.py | 1 | 4052 | # encoding: utf-8
# module PyQt4.QtNetwork
# from /usr/lib/python3/dist-packages/PyQt4/QtNetwork.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QNetworkCookie(): # skipped bases: <class 'sip.simplewrapper'>
"""
QNetworkCookie(QByteArray name=QByteArray(), QByteArray value=QByteArray())
QNetworkCookie(QNetworkCookie)
"""
def domain(self): # real signature unknown; restored from __doc__
""" QNetworkCookie.domain() -> str """
return ""
def expirationDate(self): # real signature unknown; restored from __doc__
""" QNetworkCookie.expirationDate() -> QDateTime """
pass
def isHttpOnly(self): # real signature unknown; restored from __doc__
""" QNetworkCookie.isHttpOnly() -> bool """
return False
def isSecure(self): # real signature unknown; restored from __doc__
""" QNetworkCookie.isSecure() -> bool """
return False
def isSessionCookie(self): # real signature unknown; restored from __doc__
""" QNetworkCookie.isSessionCookie() -> bool """
return False
def name(self): # real signature unknown; restored from __doc__
""" QNetworkCookie.name() -> QByteArray """
pass
def parseCookies(self, QByteArray): # real signature unknown; restored from __doc__
""" QNetworkCookie.parseCookies(QByteArray) -> list-of-QNetworkCookie """
pass
def path(self): # real signature unknown; restored from __doc__
""" QNetworkCookie.path() -> str """
return ""
def setDomain(self, p_str): # real signature unknown; restored from __doc__
""" QNetworkCookie.setDomain(str) """
pass
def setExpirationDate(self, QDateTime): # real signature unknown; restored from __doc__
""" QNetworkCookie.setExpirationDate(QDateTime) """
pass
def setHttpOnly(self, bool): # real signature unknown; restored from __doc__
""" QNetworkCookie.setHttpOnly(bool) """
pass
def setName(self, QByteArray): # real signature unknown; restored from __doc__
""" QNetworkCookie.setName(QByteArray) """
pass
def setPath(self, p_str): # real signature unknown; restored from __doc__
""" QNetworkCookie.setPath(str) """
pass
def setSecure(self, bool): # real signature unknown; restored from __doc__
""" QNetworkCookie.setSecure(bool) """
pass
def setValue(self, QByteArray): # real signature unknown; restored from __doc__
""" QNetworkCookie.setValue(QByteArray) """
pass
def toRawForm(self, QNetworkCookie_RawForm_form=None): # real signature unknown; restored from __doc__
""" QNetworkCookie.toRawForm(QNetworkCookie.RawForm form=QNetworkCookie.Full) -> QByteArray """
pass
def value(self): # real signature unknown; restored from __doc__
""" QNetworkCookie.value() -> QByteArray """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
Full = 1
NameAndValueOnly = 0
RawForm = None # (!) real value is ''
__hash__ = None
| gpl-2.0 | 6,562,310,998,037,599,000 | 32.766667 | 106 | 0.615499 | false |
SCECcode/BBP | bbp/comps/plot_srf.py | 1 | 20917 | #!/usr/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Plots slip distribution for a SRF
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import numpy as np
import matplotlib as mpl
if mpl.get_backend() != 'agg':
mpl.use('Agg') # Disables use of Tk/X11
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import pylab
# Import Broadband modules
import bband_utils
from install_cfg import InstallCfg
# Import plot config file
import plot_config
# Slip range in cm
SLIP_X_FACTOR = 20.0
SLIP_Y_FACTOR = 5.0
def read_xy_file(input_file, numx, numy):
"""
Read in fault file
"""
my_file = open(input_file)
slips = my_file.readlines()
my_file.close()
data = np.arange(numx * numy, dtype=float).reshape(numy, numx)
# Data is x-fast
for y in xrange(0, numy):
for x in xrange(0, numx):
tokens = slips[y * (numx) + x].split()
data[y][x] = tokens[2]
return data
def get_srf_num_segments(srf_file):
"""
Returns number of segments in a SRF file
"""
srf_segments = None
srf = open(srf_file, 'r')
for line in srf:
if line.startswith("PLANE"):
# Found the plane line, read number of segments
srf_segments = int(line.split()[1])
break
srf.close()
if srf_segments is None:
print("ERROR: Could not read number of segments from "
"SRF file: %s" % (src_file))
sys.exit(1)
# Return number of segments
return srf_segments
def get_srf_params(srf_file, segment=0):
"""
Reads fault_len, width, dlen, dwid, and azimuth from the srf_file
Segment allows users to specify segment of interest (0-based)
"""
srf_params1 = None
srf_params2 = None
srf = open(srf_file, 'r')
for line in srf:
if line.startswith("PLANE"):
# Found the plane line, read number of segments
srf_segments = int(line.split()[1])
if srf_segments < segment + 1:
print("ERROR: Requested parameters from segment %d, "
" SRF file only has %d segment(s)!" %
(segment + 1, srf_segments))
sys.exit(1)
for _ in range(segment):
# Skip lines to get to the segment we want
_ = srf.next()
_ = srf.next()
# The next line should have what we need
srf_params1 = srf.next()
srf_params2 = srf.next()
break
srf.close()
if srf_params1 is None or srf_params2 is None:
print("ERROR: Cannot determine parameters from SRF file %s" %
(srf_file))
sys.exit(1)
srf_params1 = srf_params1.strip()
srf_params1 = srf_params1.split()
srf_params2 = srf_params2.strip()
srf_params2 = srf_params2.split()
# Make sure we have the correct number of pieces
if len(srf_params1) != 6 or len(srf_params2) != 5:
print("ERROR: Cannot parse params from SRF file %s" %
(srf_file))
sys.exit(1)
# Pick the parameters that we need
params = {}
params["dim_len"] = int(srf_params1[2])
params["dim_wid"] = int(srf_params1[3])
params["fault_len"] = float(srf_params1[4])
params["fault_width"] = float(srf_params1[5])
params["azimuth"] = int(float(srf_params2[0]))
return params
def plot_multi_srf_files(plottitle, srffiles, outdir):
"""
Produces the multi-segment SRF plot
"""
num_segments = len(srffiles)
srf_params = []
srf_dims = []
srf_extents = []
srf_slips = []
srf_tinits = []
for srffile in srffiles:
# Get SRF parameters
params = get_srf_params(srffile)
dim_len = params["dim_len"]
dim_wid = params["dim_wid"]
fault_len = params["fault_len"]
fault_width = params["fault_width"]
dims = [dim_len, dim_wid]
extents = [-(fault_len / 2), (fault_len / 2),
fault_width, 0.0]
# Read in SRF slips
slipfile = "%s.slip" % (os.path.splitext(srffile)[0])
slips = read_xy_file(slipfile, dims[0], dims[1])
# Read in SRF tinits
tinitfile = "%s.tinit" % (os.path.splitext(srffile)[0])
tinits = read_xy_file(tinitfile, dims[0], dims[1])
# Find avg/max slip
sumslip = 0.0
minslip = 100000.0
maxslip = 0.0
for y in xrange(0, dims[1]):
for x in xrange(0, dims[0]):
if slips[y][x] > maxslip:
maxslip = slips[y][x]
if slips[y][x] < minslip:
minslip = slips[y][x]
sumslip = sumslip + slips[y][x]
params["minslip"] = minslip
params["maxslip"] = maxslip
params["sumslip"] = sumslip
# Add to our lists
srf_params.append(params)
srf_dims.append(dims)
srf_extents.append(extents)
srf_slips.append(slips)
srf_tinits.append(tinits)
# Calculate min, max, average slip
avgslip = 0.0
minslip = 100000.0
maxslip = 0.0
totalpts = 0.0
for params, dims in zip(srf_params, srf_dims):
avgslip = avgslip + params["sumslip"]
totalpts = totalpts + (dims[0] * dims[1])
minslip = min(minslip, params["minslip"])
maxslip = max(maxslip, params["maxslip"])
avgslip = avgslip / totalpts
# Create subfigures
fig, subfigs = pylab.plt.subplots(1, num_segments, sharey=True)
# Set plot dims
fig.set_size_inches(11, 4.2)
# Set title
fig.suptitle('%s\nMin/Avg/Max Slip = %d/%d/%d' % (plottitle,
int(minslip),
int(avgslip),
int(maxslip)), size=12)
# Set up propotions, first we calculate what we need
num_spaces = num_segments - 1
between_space = 0.02
total_space = 0.9 # Figure goes from 0.05 to 0.95
usable_space = total_space - between_space * num_spaces
total_len = 0.0
for params in srf_params:
total_len = total_len + params["fault_len"]
ratios = []
for params in srf_params:
ratios.append(params["fault_len"] / total_len)
# Now we apply these to the axes
current_position = 0.05
for subfig, ratio in zip(subfigs, ratios):
current_len = usable_space * ratio
subfig.set_position([current_position, 0.2,
current_len, 0.60])
current_position = current_position + current_len + between_space
# Setup slip color scale
cmap = cm.hot_r
d = int(maxslip / SLIP_X_FACTOR + 0.0)
while SLIP_X_FACTOR * d < 0.9 * maxslip:
d = d + 1
colormin = 0.0
colormax = float(SLIP_X_FACTOR * d)
colorint = float(SLIP_Y_FACTOR * d)
norm = mcolors.Normalize(vmin=colormin, vmax=colormax)
for (subfig, params, dims,
slips, tinits,
extents) in zip(subfigs, srf_params, srf_dims,
srf_slips, srf_tinits, srf_extents):
subfig.set_adjustable('box-forced')
# Plot slips
im = subfig.imshow(slips, cmap=cmap, norm=norm, extent=extents,
interpolation='nearest')
# Freeze the axis extents
subfig.set_autoscale_on(False)
# Set font size
for tick in subfig.get_xticklabels():
tick.set_fontsize(8)
for tick in subfig.get_yticklabels():
tick.set_fontsize(8)
subfig.set_title(u"Azimuth = %d\u00b0" % (params["azimuth"]), size=8)
subfig.set_xlabel("Along Strike (km)", size=8)
if subfig is subfigs[0]:
subfig.set_ylabel("Down Dip (km)", size=8)
# Setup tinit contours
mintinit = 100000.0
maxtinit = 0.0
for y in xrange(0, dims[1]):
for x in xrange(0, dims[0]):
if tinits[y][x] > maxtinit:
maxtinit = tinits[y][x]
if tinits[y][x] < mintinit:
mintinit = tinits[y][x]
contour_intervals = ((maxtinit - mintinit) /
plot_config.PLOT_SRF_DEFAULT_CONTOUR_INTERVALS)
# Plot tinit contours
subfig.contour(tinits,
pylab.linspace(mintinit, maxtinit,
round(contour_intervals)),
origin='upper', extent=extents, colors='k')
# Setup slip color scale
colorbar_ax = fig.add_axes([0.2, 0.1, 0.6, 0.02])
cb = fig.colorbar(im, cax=colorbar_ax, orientation='horizontal',
ticks=pylab.linspace(colormin, colormax,
(colormax/colorint) + 1))
cb.set_label('Slip (cm)', fontsize=8)
for tick in cb.ax.get_xticklabels():
tick.set_fontsize(8)
# Save plot to file
srffile = os.path.splitext(os.path.basename(srffiles[0]))[0]
if srffile.find("_seg") > 0:
srffile = srffile[0:srffile.find("_seg")]
outfile = os.path.join(outdir, "%s.png" % (srffile))
print("Saving plot to %s" % (outfile))
pylab.savefig(outfile, format="png", transparent=False, dpi=plot_config.dpi)
def plot_multi_plot(num_segments, srf_params, srf_dims,
srf_extents, srf_slips, srf_tinits,
plottitle, srffile, outdir):
"""
Create actual plot for multi-segments
"""
# Calculate min, max, average slip
avgslip = 0.0
minslip = 100000.0
maxslip = 0.0
totalpts = 0.0
for params, dims in zip(srf_params, srf_dims):
avgslip = avgslip + params["sumslip"]
totalpts = totalpts + (dims[0] * dims[1])
minslip = min(minslip, params["minslip"])
maxslip = max(maxslip, params["maxslip"])
avgslip = avgslip / totalpts
# Create subfigures
fig, subfigs = pylab.plt.subplots(1, num_segments, sharey=True)
# Make sure it is an array
if num_segments == 1:
subfigs = [subfigs]
# Set plot dims
fig.set_size_inches(11, 4)
# Set title
fig.suptitle('%s\nMin/Avg/Max Slip = %d/%d/%d' % (plottitle,
int(minslip),
int(avgslip),
int(maxslip)), size=12)
# Set up propotions, first we calculate what we need
num_spaces = num_segments - 1
between_space = 0.02
total_space = 0.8 # Figure goes from 0.1 to 0.9
usable_space = total_space - between_space * num_spaces
total_len = 0.0
for params in srf_params:
total_len = total_len + params["fault_len"]
ratios = []
for params in srf_params:
ratios.append(params["fault_len"] / total_len)
# Now we apply these to the axes
current_position = 0.1
for subfig, ratio in zip(subfigs, ratios):
current_len = usable_space * ratio
subfig.set_position([current_position, 0.2,
current_len, 0.60])
current_position = current_position + current_len + between_space
# Setup slip color scale
cmap = cm.hot_r
d = int(maxslip / SLIP_X_FACTOR + 0.0)
while SLIP_X_FACTOR * d < 0.9 * maxslip:
d = d + 1
colormin = 0.0
colormax = float(SLIP_X_FACTOR * d)
colorint = float(SLIP_Y_FACTOR * d)
norm = mcolors.Normalize(vmin=colormin, vmax=colormax)
for (subfig, params, dims,
slips, tinits,
extents) in zip(subfigs, srf_params, srf_dims,
srf_slips, srf_tinits, srf_extents):
subfig.set_adjustable('box-forced')
# Plot slips
im = subfig.imshow(slips, cmap=cmap, norm=norm, extent=extents,
interpolation='nearest')
# Freeze the axis extents
subfig.set_autoscale_on(False)
# Set font size
for tick in subfig.get_xticklabels():
tick.set_fontsize(8)
for tick in subfig.get_yticklabels():
tick.set_fontsize(8)
subfig.set_title(u"Azimuth = %d\u00b0" % (params["azimuth"]), size=8)
subfig.set_xlabel("Along Strike (km)", size=8)
if subfig is subfigs[0]:
subfig.set_ylabel("Down Dip (km)", size=8)
# Setup tinit contours
mintinit = 100000.0
maxtinit = 0.0
for y in xrange(0, dims[1]):
for x in xrange(0, dims[0]):
if tinits[y][x] > maxtinit:
maxtinit = tinits[y][x]
if tinits[y][x] < mintinit:
mintinit = tinits[y][x]
contour_intervals = ((maxtinit - mintinit) /
plot_config.PLOT_SRF_DEFAULT_CONTOUR_INTERVALS)
if contour_intervals < 10:
contour_intervals = 10
# Plot tinit contours
subfig.contour(tinits,
pylab.linspace(mintinit, maxtinit,
round(contour_intervals)),
origin='upper', extent=extents, colors='k')
# Setup slip color scale
colorbar_ax = fig.add_axes([0.2, 0.1, 0.6, 0.02])
cb = fig.colorbar(im, cax=colorbar_ax, orientation='horizontal',
ticks=pylab.linspace(colormin, colormax,
(colormax/colorint) + 1))
cb.set_label('Slip (cm)', fontsize=8)
for tick in cb.ax.get_xticklabels():
tick.set_fontsize(8)
# Save plot to file
outfile = os.path.join(outdir,
"%s.png" %
(os.path.splitext(srffile)[0]))
print("Saving plot to %s" % (outfile))
pylab.savefig(outfile, format="png",
transparent=False, dpi=plot_config.dpi)
def plot(plottitle, srffile, outdir):
"""
Produce the SRF plot
"""
srf_params = []
srf_dims = []
srf_extents = []
srf_slips = []
srf_tinits = []
# Get number of segments
num_segments = get_srf_num_segments(srffile)
for seg in range(num_segments):
# Get SRF parameters
params = get_srf_params(srffile, seg)
dim_len = params["dim_len"]
dim_wid = params["dim_wid"]
fault_len = params["fault_len"]
fault_width = params["fault_width"]
# Plot dimensions
dims = [dim_len, dim_wid]
extents = [-(fault_len / 2), (fault_len / 2),
fault_width, 0.0]
# Read in SRF slips
slipfile = "%s_seg%d.slip" % (os.path.splitext(srffile)[0],
seg)
slips = read_xy_file(slipfile, dims[0], dims[1])
# Read in SRF tinits
tinitfile = "%s_seg%d.tinit" % (os.path.splitext(srffile)[0],
seg)
tinits = read_xy_file(tinitfile, dims[0], dims[1])
# Find avg/max slip
sumslip = 0.0
minslip = 100000.0
maxslip = 0.0
for y in xrange(0, dims[1]):
for x in xrange(0, dims[0]):
if slips[y][x] > maxslip:
maxslip = slips[y][x]
if slips[y][x] < minslip:
minslip = slips[y][x]
sumslip = sumslip + slips[y][x]
params["minslip"] = minslip
params["maxslip"] = maxslip
params["sumslip"] = sumslip
# Add to our lists
srf_params.append(params)
srf_dims.append(dims)
srf_extents.append(extents)
srf_slips.append(slips)
srf_tinits.append(tinits)
if num_segments > 1:
plot_multi_plot(num_segments, srf_params, srf_dims,
srf_extents, srf_slips, srf_tinits,
plottitle, srffile, outdir)
return
# Simple case for 1 segment only, keep it as before
dims = srf_dims[0]
tinits = srf_tinits[0]
slips = srf_slips[0]
extents = srf_extents[0]
# Calculate min, max, average slip
avgslip = 0.0
minslip = 100000.0
maxslip = 0.0
totalpts = 0.0
for params, dims in zip(srf_params, srf_dims):
avgslip = avgslip + params["sumslip"]
totalpts = totalpts + (dims[0] * dims[1])
minslip = min(minslip, params["minslip"])
maxslip = max(maxslip, params["maxslip"])
avgslip = avgslip / totalpts
# Set plot dims
pylab.gcf().set_size_inches(6, 8)
pylab.gcf().clf()
# Set title and adjust title y-position
t = pylab.title("%s\nAvg/Max Slip = %d/%d" % (plottitle,
int(avgslip),
int(maxslip)), size=12)
t.set_y(1.05)
# Setup slip color scale
cmap = cm.hot_r
d = int(maxslip / SLIP_X_FACTOR + 0.0)
while SLIP_X_FACTOR * d < 0.9 * maxslip:
d = d + 1
colormin = 0.0
colormax = float(SLIP_X_FACTOR * d)
colorint = float(SLIP_Y_FACTOR * d)
norm = mcolors.Normalize(vmin=colormin, vmax=colormax)
# Plot slips
pylab.imshow(slips, cmap=cmap,
norm=norm, extent=extents,
interpolation='nearest')
# Freeze the axis extents
pylab.gca().set_autoscale_on(False)
pylab.xlabel("Along Strike (km)", size=8)
pylab.ylabel("Down Dip (km)", size=8)
# Set font size
for tick in pylab.gca().get_xticklabels():
tick.set_fontsize(8)
for tick in pylab.gca().get_yticklabels():
tick.set_fontsize(8)
# Setup slip color scale
cb = pylab.colorbar(orientation='horizontal', shrink=0.5,
ticks=pylab.linspace(colormin, colormax,
(colormax/colorint) + 1))
cb.set_label('Slip (cm)', fontsize=8)
for tick in cb.ax.get_xticklabels():
tick.set_fontsize(8)
# Setup tinit contours
mintinit = 100000.0
maxtinit = 0.0
for y in xrange(0, dims[1]):
for x in xrange(0, dims[0]):
if tinits[y][x] > maxtinit:
maxtinit = tinits[y][x]
if tinits[y][x] < mintinit:
mintinit = tinits[y][x]
contour_intervals = ((maxtinit - mintinit) /
plot_config.PLOT_SRF_DEFAULT_CONTOUR_INTERVALS)
if contour_intervals < 10:
contour_intervals = 10
# Plot tinit contours
pylab.contour(tinits,
pylab.linspace(mintinit, maxtinit,
round(contour_intervals)),
origin='upper', extent=extents, colors='k')
outfile = os.path.join(outdir,
"%s.png" %
(os.path.splitext(srffile)[0]))
print("Saving plot to %s" % (outfile))
pylab.savefig(outfile, format="png",
transparent=False, dpi=plot_config.dpi)
def run(r_srffile, sim_id=0):
"""
Creates a SRF plot from an SRF file
"""
install = InstallCfg.getInstance()
a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id))
a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id))
srf2xyz_bin = os.path.join(install.A_GP_BIN_DIR, "srf2xyz")
# Save current directory
old_cwd = os.getcwd()
os.chdir(a_tmpdir)
# Get number of segments
num_segments = get_srf_num_segments(r_srffile)
srfbase = r_srffile[0:r_srffile.find(".srf")]
# Write slip and tinit files for each segment
for seg in range(num_segments):
slipfile = "%s_seg%d.slip" % (srfbase, seg)
cmd = ("%s calc_xy=0 type=slip nseg=%d < %s > %s" %
(srf2xyz_bin, seg, r_srffile, slipfile))
bband_utils.runprog(cmd)
tinitfile = "%s_seg%d.tinit" % (srfbase, seg)
cmd = ("%s calc_xy=0 type=tinit nseg=%d < %s > %s" %
(srf2xyz_bin, seg, r_srffile, tinitfile))
bband_utils.runprog(cmd)
plottitle = 'Rupture Model for %s' % (r_srffile)
plot(plottitle, r_srffile, a_outdir)
os.chdir(old_cwd)
def usage():
"""
Prints program usage to the user
"""
print("usage: %s srf_file <sim_id>" %
(sys.argv[0]))
return
if __name__ == '__main__':
if len(sys.argv) != 3:
usage()
sys.exit(1)
SRF_FILE = sys.argv[1]
SIMID = sys.argv[2]
run(SRF_FILE, SIMID)
| apache-2.0 | -5,708,014,791,711,826,000 | 32.201587 | 80 | 0.553569 | false |
hugobarzano/NoInventory | Selenium/test_catalogo.py | 1 | 2397 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class CatalogoTest1(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://noinventory.cloudapp.net"
self.verificationErrors = []
self.accept_next_alert = True
def test_catalogo_test1(self):
driver = self.driver
driver.get(self.base_url + "/")
driver.find_element_by_link_text("Catalogos").click()
driver.find_element_by_xpath("(//button[@type='button'])[2]").click()
driver.find_element_by_id("id_nombre_catalogo").clear()
driver.find_element_by_id("id_nombre_catalogo").send_keys("Catalogo sele")
driver.find_element_by_id("id_descripcion_catalogo").clear()
driver.find_element_by_id("id_descripcion_catalogo").send_keys("Catalogo para realizar pruebas con selenium")
driver.find_element_by_id("id_tag_catalogo").clear()
driver.find_element_by_id("id_tag_catalogo").send_keys("tag del catalogo")
driver.find_element_by_name("submit").click()
driver.find_element_by_id("texto").clear()
driver.find_element_by_id("texto").send_keys("sele")
driver.find_element_by_id("busqueda").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | -2,864,360,374,179,841,000 | 38.95 | 117 | 0.652065 | false |
kakunbsc/enigma2.1 | lib/python/Components/config.py | 1 | 46098 | from enigma import getPrevAsciiCode
from Tools.NumericalTextInput import NumericalTextInput
from Tools.Directories import resolveFilename, SCOPE_CONFIG, fileExists
from Components.Harddisk import harddiskmanager
from copy import copy as copy_copy
from os import path as os_path
from time import localtime, strftime
# ConfigElement, the base class of all ConfigElements.
# it stores:
# value the current value, usefully encoded.
# usually a property which retrieves _value,
# and maybe does some reformatting
# _value the value as it's going to be saved in the configfile,
# though still in non-string form.
# this is the object which is actually worked on.
# default the initial value. If _value is equal to default,
# it will not be stored in the config file
# saved_value is a text representation of _value, stored in the config file
#
# and has (at least) the following methods:
# save() stores _value into saved_value,
# (or stores 'None' if it should not be stored)
# load() loads _value from saved_value, or loads
# the default if saved_value is 'None' (default)
# or invalid.
#
class ConfigElement(object):
def __init__(self):
self.saved_value = None
self.last_value = None
self.save_disabled = False
self.__notifiers = None
self.__notifiers_final = None
self.enabled = True
self.callNotifiersOnSaveAndCancel = False
def getNotifiers(self):
if self.__notifiers is None:
self.__notifiers = [ ]
return self.__notifiers
def setNotifiers(self, val):
self.__notifiers = val
notifiers = property(getNotifiers, setNotifiers)
def getNotifiersFinal(self):
if self.__notifiers_final is None:
self.__notifiers_final = [ ]
return self.__notifiers_final
def setNotifiersFinal(self, val):
self.__notifiers_final = val
notifiers_final = property(getNotifiersFinal, setNotifiersFinal)
# you need to override this to do input validation
def setValue(self, value):
self._value = value
self.changed()
def getValue(self):
return self._value
value = property(getValue, setValue)
# you need to override this if self.value is not a string
def fromstring(self, value):
return value
# you can overide this for fancy default handling
def load(self):
sv = self.saved_value
if sv is None:
self.value = self.default
else:
self.value = self.fromstring(sv)
def tostring(self, value):
return str(value)
# you need to override this if str(self.value) doesn't work
def save(self):
if self.save_disabled or self.value == self.default:
self.saved_value = None
else:
self.saved_value = self.tostring(self.value)
if self.callNotifiersOnSaveAndCancel:
self.changed()
def cancel(self):
self.load()
if self.callNotifiersOnSaveAndCancel:
self.changed()
def isChanged(self):
sv = self.saved_value
if sv is None and self.value == self.default:
return False
return self.tostring(self.value) != sv
def changed(self):
if self.__notifiers:
for x in self.notifiers:
x(self)
def changedFinal(self):
if self.__notifiers_final:
for x in self.notifiers_final:
x(self)
def addNotifier(self, notifier, initial_call = True, immediate_feedback = True):
assert callable(notifier), "notifiers must be callable"
if immediate_feedback:
self.notifiers.append(notifier)
else:
self.notifiers_final.append(notifier)
# CHECKME:
# do we want to call the notifier
# - at all when adding it? (yes, though optional)
# - when the default is active? (yes)
# - when no value *yet* has been set,
# because no config has ever been read (currently yes)
# (though that's not so easy to detect.
# the entry could just be new.)
if initial_call:
notifier(self)
def disableSave(self):
self.save_disabled = True
def __call__(self, selected):
return self.getMulti(selected)
def onSelect(self, session):
pass
def onDeselect(self, session):
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
KEY_LEFT = 0
KEY_RIGHT = 1
KEY_OK = 2
KEY_DELETE = 3
KEY_BACKSPACE = 4
KEY_HOME = 5
KEY_END = 6
KEY_TOGGLEOW = 7
KEY_ASCII = 8
KEY_TIMEOUT = 9
KEY_NUMBERS = range(12, 12+10)
KEY_0 = 12
KEY_9 = 12+9
def getKeyNumber(key):
assert key in KEY_NUMBERS
return key - KEY_0
class choicesList(object): # XXX: we might want a better name for this
LIST_TYPE_LIST = 1
LIST_TYPE_DICT = 2
def __init__(self, choices, type = None):
self.choices = choices
if type is None:
if isinstance(choices, list):
self.type = choicesList.LIST_TYPE_LIST
elif isinstance(choices, dict):
self.type = choicesList.LIST_TYPE_DICT
else:
assert False, "choices must be dict or list!"
else:
self.type = type
def __list__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[0] for x in self.choices]
else:
ret = self.choices.keys()
return ret or [""]
def __iter__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[0] for x in self.choices]
else:
ret = self.choices
return iter(ret or [""])
def __len__(self):
return len(self.choices) or 1
def __getitem__(self, index):
if self.type == choicesList.LIST_TYPE_LIST:
ret = self.choices[index]
if isinstance(ret, tuple):
ret = ret[0]
return ret
return self.choices.keys()[index]
def index(self, value):
return self.__list__().index(value)
def __setitem__(self, index, value):
if self.type == choicesList.LIST_TYPE_LIST:
orig = self.choices[index]
if isinstance(orig, tuple):
self.choices[index] = (value, orig[1])
else:
self.choices[index] = value
else:
key = self.choices.keys()[index]
orig = self.choices[key]
del self.choices[key]
self.choices[value] = orig
def default(self):
choices = self.choices
if not choices:
return ""
if self.type is choicesList.LIST_TYPE_LIST:
default = choices[0]
if isinstance(default, tuple):
default = default[0]
else:
default = choices.keys()[0]
return default
class descriptionList(choicesList): # XXX: we might want a better name for this
def __list__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[1] for x in self.choices]
else:
ret = self.choices.values()
return ret or [""]
def __iter__(self):
return iter(self.__list__())
def __getitem__(self, index):
if self.type == choicesList.LIST_TYPE_LIST:
for x in self.choices:
if isinstance(x, tuple):
if x[0] == index:
return str(x[1])
elif x == index:
return str(x)
return str(index) # Fallback!
else:
return str(self.choices.get(index, ""))
def __setitem__(self, index, value):
if self.type == choicesList.LIST_TYPE_LIST:
i = self.index(index)
orig = self.choices[i]
if isinstance(orig, tuple):
self.choices[i] = (orig[0], value)
else:
self.choices[i] = value
else:
self.choices[index] = value
#
# ConfigSelection is a "one of.."-type.
# it has the "choices", usually a list, which contains
# (id, desc)-tuples (or just only the ids, in case the id
# will be used as description)
#
# all ids MUST be plain strings.
#
class ConfigSelection(ConfigElement):
def __init__(self, choices, default = None):
ConfigElement.__init__(self)
self.choices = choicesList(choices)
if default is None:
default = self.choices.default()
self._descr = None
self.default = self._value = self.last_value = default
def setChoices(self, choices, default = None):
self.choices = choicesList(choices)
if default is None:
default = self.choices.default()
self.default = default
if self.value not in self.choices:
self.value = default
def setValue(self, value):
if value in self.choices:
self._value = value
else:
self._value = self.default
self._descr = None
self.changed()
def tostring(self, val):
return val
def getValue(self):
return self._value
def setCurrentText(self, text):
i = self.choices.index(self.value)
self.choices[i] = text
self._descr = self.description[text] = text
self._value = text
value = property(getValue, setValue)
def getIndex(self):
return self.choices.index(self.value)
index = property(getIndex)
# GUI
def handleKey(self, key):
nchoices = len(self.choices)
i = self.choices.index(self.value)
if key == KEY_LEFT:
self.value = self.choices[(i + nchoices - 1) % nchoices]
elif key == KEY_RIGHT:
self.value = self.choices[(i + 1) % nchoices]
elif key == KEY_HOME:
self.value = self.choices[0]
elif key == KEY_END:
self.value = self.choices[nchoices - 1]
def selectNext(self):
nchoices = len(self.choices)
i = self.choices.index(self.value)
self.value = self.choices[(i + 1) % nchoices]
def getText(self):
if self._descr is not None:
return self._descr
descr = self._descr = self.description[self.value]
if descr:
return _(descr)
return descr
def getMulti(self, selected):
if self._descr is not None:
descr = self._descr
else:
descr = self._descr = self.description[self.value]
if descr:
return ("text", _(descr))
return ("text", descr)
# HTML
def getHTML(self, id):
res = ""
for v in self.choices:
descr = self.description[v]
if self.value == v:
checked = 'checked="checked" '
else:
checked = ''
res += '<input type="radio" name="' + id + '" ' + checked + 'value="' + v + '">' + descr + "</input></br>\n"
return res;
def unsafeAssign(self, value):
# setValue does check if value is in choices. This is safe enough.
self.value = value
description = property(lambda self: descriptionList(self.choices.choices, self.choices.type))
# a binary decision.
#
# several customized versions exist for different
# descriptions.
#
boolean_descriptions = {False: "false", True: "true"}
class ConfigBoolean(ConfigElement):
def __init__(self, default = False, descriptions = boolean_descriptions):
ConfigElement.__init__(self)
self.descriptions = descriptions
self.value = self.last_value = self.default = default
def handleKey(self, key):
if key in (KEY_LEFT, KEY_RIGHT):
self.value = not self.value
elif key == KEY_HOME:
self.value = False
elif key == KEY_END:
self.value = True
def getText(self):
descr = self.descriptions[self.value]
if descr:
return _(descr)
return descr
def getMulti(self, selected):
descr = self.descriptions[self.value]
if descr:
return ("text", _(descr))
return ("text", descr)
def tostring(self, value):
if not value:
return "false"
else:
return "true"
def fromstring(self, val):
if val == "true":
return True
else:
return False
def getHTML(self, id):
if self.value:
checked = ' checked="checked"'
else:
checked = ''
return '<input type="checkbox" name="' + id + '" value="1" ' + checked + " />"
# this is FLAWED. and must be fixed.
def unsafeAssign(self, value):
if value == "1":
self.value = True
else:
self.value = False
def onDeselect(self, session):
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
yes_no_descriptions = {False: _("no"), True: _("yes")}
class ConfigYesNo(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = yes_no_descriptions)
on_off_descriptions = {False: _("off"), True: _("on")}
class ConfigOnOff(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = on_off_descriptions)
enable_disable_descriptions = {False: _("disable"), True: _("enable")}
class ConfigEnableDisable(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = enable_disable_descriptions)
class ConfigDateTime(ConfigElement):
def __init__(self, default, formatstring, increment = 86400):
ConfigElement.__init__(self)
self.increment = increment
self.formatstring = formatstring
self.value = self.last_value = self.default = int(default)
def handleKey(self, key):
if key == KEY_LEFT:
self.value = self.value - self.increment
elif key == KEY_RIGHT:
self.value = self.value + self.increment
elif key == KEY_HOME or key == KEY_END:
self.value = self.default
def getText(self):
return strftime(self.formatstring, localtime(self.value))
def getMulti(self, selected):
return ("text", strftime(self.formatstring, localtime(self.value)))
def fromstring(self, val):
return int(val)
# *THE* mighty config element class
#
# allows you to store/edit a sequence of values.
# can be used for IP-addresses, dates, plain integers, ...
# several helper exist to ease this up a bit.
#
class ConfigSequence(ConfigElement):
def __init__(self, seperator, limits, default, censor_char = ""):
ConfigElement.__init__(self)
assert isinstance(limits, list) and len(limits[0]) == 2, "limits must be [(min, max),...]-tuple-list"
assert censor_char == "" or len(censor_char) == 1, "censor char must be a single char (or \"\")"
#assert isinstance(default, list), "default must be a list"
#assert isinstance(default[0], int), "list must contain numbers"
#assert len(default) == len(limits), "length must match"
self.marked_pos = 0
self.seperator = seperator
self.limits = limits
self.censor_char = censor_char
self.last_value = self.default = default
self.value = copy_copy(default)
self.endNotifier = None
def validate(self):
max_pos = 0
num = 0
for i in self._value:
max_pos += len(str(self.limits[num][1]))
if self._value[num] < self.limits[num][0]:
self._value[num] = self.limits[num][0]
if self._value[num] > self.limits[num][1]:
self._value[num] = self.limits[num][1]
num += 1
if self.marked_pos >= max_pos:
if self.endNotifier:
for x in self.endNotifier:
x(self)
self.marked_pos = max_pos - 1
if self.marked_pos < 0:
self.marked_pos = 0
def validatePos(self):
if self.marked_pos < 0:
self.marked_pos = 0
total_len = sum([len(str(x[1])) for x in self.limits])
if self.marked_pos >= total_len:
self.marked_pos = total_len - 1
def addEndNotifier(self, notifier):
if self.endNotifier is None:
self.endNotifier = []
self.endNotifier.append(notifier)
def handleKey(self, key):
if key == KEY_LEFT:
self.marked_pos -= 1
self.validatePos()
elif key == KEY_RIGHT:
self.marked_pos += 1
self.validatePos()
elif key == KEY_HOME:
self.marked_pos = 0
self.validatePos()
elif key == KEY_END:
max_pos = 0
num = 0
for i in self._value:
max_pos += len(str(self.limits[num][1]))
num += 1
self.marked_pos = max_pos - 1
self.validatePos()
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
block_len = [len(str(x[1])) for x in self.limits]
total_len = sum(block_len)
pos = 0
blocknumber = 0
block_len_total = [0, ]
for x in block_len:
pos += block_len[blocknumber]
block_len_total.append(pos)
if pos - 1 >= self.marked_pos:
pass
else:
blocknumber += 1
# length of numberblock
number_len = len(str(self.limits[blocknumber][1]))
# position in the block
posinblock = self.marked_pos - block_len_total[blocknumber]
oldvalue = self._value[blocknumber]
olddec = oldvalue % 10 ** (number_len - posinblock) - (oldvalue % 10 ** (number_len - posinblock - 1))
newvalue = oldvalue - olddec + (10 ** (number_len - posinblock - 1) * number)
self._value[blocknumber] = newvalue
self.marked_pos += 1
self.validate()
self.changed()
def genText(self):
value = ""
mPos = self.marked_pos
num = 0;
for i in self._value:
if value: #fixme no heading separator possible
value += self.seperator
if mPos >= len(value) - 1:
mPos += 1
if self.censor_char == "":
value += ("%0" + str(len(str(self.limits[num][1]))) + "d") % i
else:
value += (self.censor_char * len(str(self.limits[num][1])))
num += 1
return (value, mPos)
def getText(self):
(value, mPos) = self.genText()
return value
def getMulti(self, selected):
(value, mPos) = self.genText()
# only mark cursor when we are selected
# (this code is heavily ink optimized!)
if self.enabled:
return ("mtext"[1-selected:], value, [mPos])
else:
return ("text", value)
def tostring(self, val):
return self.seperator.join([self.saveSingle(x) for x in val])
def saveSingle(self, v):
return str(v)
def fromstring(self, value):
return [int(x) for x in value.split(self.seperator)]
def onDeselect(self, session):
if self.last_value != self._value:
self.changedFinal()
self.last_value = copy_copy(self._value)
ip_limits = [(0,255),(0,255),(0,255),(0,255)]
class ConfigIP(ConfigSequence):
def __init__(self, default, auto_jump = False):
ConfigSequence.__init__(self, seperator = ".", limits = ip_limits, default = default)
self.block_len = [len(str(x[1])) for x in self.limits]
self.marked_block = 0
self.overwrite = True
self.auto_jump = auto_jump
def handleKey(self, key):
if key == KEY_LEFT:
if self.marked_block > 0:
self.marked_block -= 1
self.overwrite = True
elif key == KEY_RIGHT:
if self.marked_block < len(self.limits)-1:
self.marked_block += 1
self.overwrite = True
elif key == KEY_HOME:
self.marked_block = 0
self.overwrite = True
elif key == KEY_END:
self.marked_block = len(self.limits)-1
self.overwrite = True
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
oldvalue = self._value[self.marked_block]
if self.overwrite:
self._value[self.marked_block] = number
self.overwrite = False
else:
oldvalue *= 10
newvalue = oldvalue + number
if self.auto_jump and newvalue > self.limits[self.marked_block][1] and self.marked_block < len(self.limits)-1:
self.handleKey(KEY_RIGHT)
self.handleKey(key)
return
else:
self._value[self.marked_block] = newvalue
if len(str(self._value[self.marked_block])) >= self.block_len[self.marked_block]:
self.handleKey(KEY_RIGHT)
self.validate()
self.changed()
def genText(self):
value = ""
block_strlen = []
for i in self._value:
block_strlen.append(len(str(i)))
if value:
value += self.seperator
value += str(i)
leftPos = sum(block_strlen[:(self.marked_block)])+self.marked_block
rightPos = sum(block_strlen[:(self.marked_block+1)])+self.marked_block
mBlock = range(leftPos, rightPos)
return (value, mBlock)
def getMulti(self, selected):
(value, mBlock) = self.genText()
if self.enabled:
return ("mtext"[1-selected:], value, mBlock)
else:
return ("text", value)
def getHTML(self, id):
# we definitely don't want leading zeros
return '.'.join(["%d" % d for d in self.value])
mac_limits = [(1,255),(1,255),(1,255),(1,255),(1,255),(1,255)]
class ConfigMAC(ConfigSequence):
def __init__(self, default):
ConfigSequence.__init__(self, seperator = ":", limits = mac_limits, default = default)
class ConfigPosition(ConfigSequence):
def __init__(self, default, args):
ConfigSequence.__init__(self, seperator = ",", limits = [(0,args[0]),(0,args[1]),(0,args[2]),(0,args[3])], default = default)
clock_limits = [(0,23),(0,59)]
class ConfigClock(ConfigSequence):
def __init__(self, default):
t = localtime(default)
ConfigSequence.__init__(self, seperator = ":", limits = clock_limits, default = [t.tm_hour, t.tm_min])
def increment(self):
# Check if Minutes maxed out
if self._value[1] == 59:
# Increment Hour, reset Minutes
if self._value[0] < 23:
self._value[0] += 1
else:
self._value[0] = 0
self._value[1] = 0
else:
# Increment Minutes
self._value[1] += 1
# Trigger change
self.changed()
def decrement(self):
# Check if Minutes is minimum
if self._value[1] == 0:
# Decrement Hour, set Minutes to 59
if self._value[0] > 0:
self._value[0] -= 1
else:
self._value[0] = 23
self._value[1] = 59
else:
# Decrement Minutes
self._value[1] -= 1
# Trigger change
self.changed()
integer_limits = (0, 9999999999)
class ConfigInteger(ConfigSequence):
def __init__(self, default, limits = integer_limits):
ConfigSequence.__init__(self, seperator = ":", limits = [limits], default = default)
# you need to override this to do input validation
def setValue(self, value):
self._value = [value]
self.changed()
def getValue(self):
return self._value[0]
value = property(getValue, setValue)
def fromstring(self, value):
return int(value)
def tostring(self, value):
return str(value)
class ConfigPIN(ConfigInteger):
def __init__(self, default, len = 4, censor = ""):
assert isinstance(default, int), "ConfigPIN default must be an integer"
if default == -1:
default = "aaaa"
ConfigSequence.__init__(self, seperator = ":", limits = [(0, (10**len)-1)], censor_char = censor, default = default)
self.len = len
def getLength(self):
return self.len
class ConfigFloat(ConfigSequence):
def __init__(self, default, limits):
ConfigSequence.__init__(self, seperator = ".", limits = limits, default = default)
def getFloat(self):
return float(self.value[1] / float(self.limits[1][1] + 1) + self.value[0])
float = property(getFloat)
# an editable text...
class ConfigText(ConfigElement, NumericalTextInput):
def __init__(self, default = "", fixed_size = True, visible_width = False):
ConfigElement.__init__(self)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False)
self.marked_pos = 0
self.allmarked = (default != "")
self.fixed_size = fixed_size
self.visible_width = visible_width
self.offset = 0
self.overwrite = fixed_size
self.help_window = None
self.value = self.last_value = self.default = default
def validateMarker(self):
textlen = len(self.text)
if self.fixed_size:
if self.marked_pos > textlen-1:
self.marked_pos = textlen-1
else:
if self.marked_pos > textlen:
self.marked_pos = textlen
if self.marked_pos < 0:
self.marked_pos = 0
if self.visible_width:
if self.marked_pos < self.offset:
self.offset = self.marked_pos
if self.marked_pos >= self.offset + self.visible_width:
if self.marked_pos == textlen:
self.offset = self.marked_pos - self.visible_width
else:
self.offset = self.marked_pos - self.visible_width + 1
if self.offset > 0 and self.offset + self.visible_width > textlen:
self.offset = max(0, len - self.visible_width)
def insertChar(self, ch, pos, owr):
if owr or self.overwrite:
self.text = self.text[0:pos] + ch + self.text[pos + 1:]
elif self.fixed_size:
self.text = self.text[0:pos] + ch + self.text[pos:-1]
else:
self.text = self.text[0:pos] + ch + self.text[pos:]
def deleteChar(self, pos):
if not self.fixed_size:
self.text = self.text[0:pos] + self.text[pos + 1:]
elif self.overwrite:
self.text = self.text[0:pos] + " " + self.text[pos + 1:]
else:
self.text = self.text[0:pos] + self.text[pos + 1:] + " "
def deleteAllChars(self):
if self.fixed_size:
self.text = " " * len(self.text)
else:
self.text = ""
self.marked_pos = 0
def handleKey(self, key):
# this will no change anything on the value itself
# so we can handle it here in gui element
if key == KEY_DELETE:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
else:
self.deleteChar(self.marked_pos)
if self.fixed_size and self.overwrite:
self.marked_pos += 1
elif key == KEY_BACKSPACE:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
elif self.marked_pos > 0:
self.deleteChar(self.marked_pos-1)
if not self.fixed_size and self.offset > 0:
self.offset -= 1
self.marked_pos -= 1
elif key == KEY_LEFT:
self.timeout()
if self.allmarked:
self.marked_pos = len(self.text)
self.allmarked = False
else:
self.marked_pos -= 1
elif key == KEY_RIGHT:
self.timeout()
if self.allmarked:
self.marked_pos = 0
self.allmarked = False
else:
self.marked_pos += 1
elif key == KEY_HOME:
self.timeout()
self.allmarked = False
self.marked_pos = 0
elif key == KEY_END:
self.timeout()
self.allmarked = False
self.marked_pos = len(self.text)
elif key == KEY_TOGGLEOW:
self.timeout()
self.overwrite = not self.overwrite
elif key == KEY_ASCII:
self.timeout()
newChar = unichr(getPrevAsciiCode())
if not self.useableChars or newChar in self.useableChars:
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, False)
self.marked_pos += 1
elif key in KEY_NUMBERS:
owr = self.lastKey == getKeyNumber(key)
newChar = self.getKey(getKeyNumber(key))
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, owr)
elif key == KEY_TIMEOUT:
self.timeout()
if self.help_window:
self.help_window.update(self)
return
if self.help_window:
self.help_window.update(self)
self.validateMarker()
self.changed()
def nextFunc(self):
self.marked_pos += 1
self.validateMarker()
self.changed()
def getValue(self):
return self.text.encode("utf-8")
def setValue(self, val):
try:
self.text = val.decode("utf-8")
except UnicodeDecodeError:
self.text = val.decode("utf-8", "ignore")
print "Broken UTF8!"
value = property(getValue, setValue)
_value = property(getValue, setValue)
def getText(self):
return self.text.encode("utf-8")
def getMulti(self, selected):
if self.visible_width:
if self.allmarked:
mark = range(0, min(self.visible_width, len(self.text)))
else:
mark = [self.marked_pos-self.offset]
return ("mtext"[1-selected:], self.text[self.offset:self.offset+self.visible_width].encode("utf-8")+" ", mark)
else:
if self.allmarked:
mark = range(0, len(self.text))
else:
mark = [self.marked_pos]
return ("mtext"[1-selected:], self.text.encode("utf-8")+" ", mark)
def onSelect(self, session):
self.allmarked = (self.value != "")
if session is not None:
from Screens.NumericalTextInputHelpDialog import NumericalTextInputHelpDialog
self.help_window = session.instantiateDialog(NumericalTextInputHelpDialog, self)
self.help_window.show()
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if self.help_window:
session.deleteDialog(self.help_window)
self.help_window = None
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
def getHTML(self, id):
return '<input type="text" name="' + id + '" value="' + self.value + '" /><br>\n'
def unsafeAssign(self, value):
self.value = str(value)
class ConfigPassword(ConfigText):
def __init__(self, default = "", fixed_size = False, visible_width = False, censor = "*"):
ConfigText.__init__(self, default = default, fixed_size = fixed_size, visible_width = visible_width)
self.censor_char = censor
self.hidden = True
def getMulti(self, selected):
mtext, text, mark = ConfigText.getMulti(self, selected)
if self.hidden:
text = len(text) * self.censor_char
return (mtext, text, mark)
def onSelect(self, session):
ConfigText.onSelect(self, session)
self.hidden = False
def onDeselect(self, session):
ConfigText.onDeselect(self, session)
self.hidden = True
# lets the user select between [min, min+stepwidth, min+(stepwidth*2)..., maxval] with maxval <= max depending
# on the stepwidth
# min, max, stepwidth, default are int values
# wraparound: pressing RIGHT key at max value brings you to min value and vice versa if set to True
class ConfigSelectionNumber(ConfigSelection):
def __init__(self, min, max, stepwidth, default = None, wraparound = False):
self.wraparound = wraparound
if default is None:
default = min
default = str(default)
choices = []
step = min
while step <= max:
choices.append(str(step))
step += stepwidth
ConfigSelection.__init__(self, choices, default)
def getValue(self):
return int(ConfigSelection.getValue(self))
def setValue(self, val):
ConfigSelection.setValue(self, str(val))
def handleKey(self, key):
if not self.wraparound:
if key == KEY_RIGHT:
if len(self.choices) == (self.choices.index(self.value) + 1):
return
if key == KEY_LEFT:
if self.choices.index(self.value) == 0:
return
ConfigSelection.handleKey(self, key)
class ConfigNumber(ConfigText):
def __init__(self, default = 0):
ConfigText.__init__(self, str(default), fixed_size = False)
def getValue(self):
return int(self.text)
def setValue(self, val):
self.text = str(val)
value = property(getValue, setValue)
_value = property(getValue, setValue)
def isChanged(self):
sv = self.saved_value
strv = self.tostring(self.value)
if sv is None and strv == self.default:
return False
return strv != sv
def conform(self):
pos = len(self.text) - self.marked_pos
self.text = self.text.lstrip("0")
if self.text == "":
self.text = "0"
if pos > len(self.text):
self.marked_pos = 0
else:
self.marked_pos = len(self.text) - pos
def handleKey(self, key):
if key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
ascii = getPrevAsciiCode()
if not (48 <= ascii <= 57):
return
else:
ascii = getKeyNumber(key) + 48
newChar = unichr(ascii)
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, False)
self.marked_pos += 1
else:
ConfigText.handleKey(self, key)
self.conform()
def onSelect(self, session):
self.allmarked = (self.value != "")
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
class ConfigSearchText(ConfigText):
def __init__(self, default = "", fixed_size = False, visible_width = False):
ConfigText.__init__(self, default = default, fixed_size = fixed_size, visible_width = visible_width)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False, search = True)
class ConfigDirectory(ConfigText):
def __init__(self, default="", visible_width=60):
ConfigText.__init__(self, default, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
def getValue(self):
if self.text == "":
return None
else:
return ConfigText.getValue(self)
def setValue(self, val):
if val == None:
val = ""
ConfigText.setValue(self, val)
def getMulti(self, selected):
if self.text == "":
return ("mtext"[1-selected:], _("List of Storage Devices"), range(0))
else:
return ConfigText.getMulti(self, selected)
def onSelect(self, session):
self.allmarked = (self.value != "")
# a slider.
class ConfigSlider(ConfigElement):
def __init__(self, default = 0, increment = 1, limits = (0, 100)):
ConfigElement.__init__(self)
self.value = self.last_value = self.default = default
self.min = limits[0]
self.max = limits[1]
self.increment = increment
def checkValues(self):
if self.value < self.min:
self.value = self.min
if self.value > self.max:
self.value = self.max
def handleKey(self, key):
if key == KEY_LEFT:
self.value -= self.increment
elif key == KEY_RIGHT:
self.value += self.increment
elif key == KEY_HOME:
self.value = self.min
elif key == KEY_END:
self.value = self.max
else:
return
self.checkValues()
def getText(self):
return "%d / %d" % (self.value, self.max)
def getMulti(self, selected):
self.checkValues()
return ("slider", self.value, self.max)
def fromstring(self, value):
return int(value)
# a satlist. in fact, it's a ConfigSelection.
class ConfigSatlist(ConfigSelection):
def __init__(self, list, default = None):
if default is not None:
default = str(default)
ConfigSelection.__init__(self, choices = [(str(orbpos), desc) for (orbpos, desc, flags) in list], default = default)
def getOrbitalPosition(self):
if self.value == "":
return None
return int(self.value)
orbital_position = property(getOrbitalPosition)
class ConfigSet(ConfigElement):
def __init__(self, choices, default = []):
ConfigElement.__init__(self)
if isinstance(choices, list):
choices.sort()
self.choices = choicesList(choices, choicesList.LIST_TYPE_LIST)
else:
assert False, "ConfigSet choices must be a list!"
if default is None:
default = []
self.pos = -1
default.sort()
self.last_value = self.default = default
self.value = default[:]
def toggleChoice(self, choice):
value = self.value
if choice in value:
value.remove(choice)
else:
value.append(choice)
value.sort()
self.changed()
def handleKey(self, key):
if key in KEY_NUMBERS + [KEY_DELETE, KEY_BACKSPACE]:
if self.pos != -1:
self.toggleChoice(self.choices[self.pos])
elif key == KEY_LEFT:
if self.pos < 0:
self.pos = len(self.choices)-1
else:
self.pos -= 1
elif key == KEY_RIGHT:
if self.pos >= len(self.choices)-1:
self.pos = -1
else:
self.pos += 1
elif key in (KEY_HOME, KEY_END):
self.pos = -1
def genString(self, lst):
res = ""
for x in lst:
res += self.description[x]+" "
return res
def getText(self):
return self.genString(self.value)
def getMulti(self, selected):
if not selected or self.pos == -1:
return ("text", self.genString(self.value))
else:
tmp = self.value[:]
ch = self.choices[self.pos]
mem = ch in self.value
if not mem:
tmp.append(ch)
tmp.sort()
ind = tmp.index(ch)
val1 = self.genString(tmp[:ind])
val2 = " "+self.genString(tmp[ind+1:])
if mem:
chstr = " "+self.description[ch]+" "
else:
chstr = "("+self.description[ch]+")"
len_val1 = len(val1)
return ("mtext", val1+chstr+val2, range(len_val1, len_val1 + len(chstr)))
def onDeselect(self, session):
self.pos = -1
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value[:]
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
description = property(lambda self: descriptionList(self.choices.choices, choicesList.LIST_TYPE_LIST))
class ConfigLocations(ConfigElement):
def __init__(self, default = [], visible_width = False):
ConfigElement.__init__(self)
self.visible_width = visible_width
self.pos = -1
self.default = default
self.locations = []
self.mountpoints = []
self.value = default[:]
def setValue(self, value):
locations = self.locations
loc = [x[0] for x in locations if x[3]]
add = [x for x in value if not x in loc]
diff = add + [x for x in loc if not x in value]
locations = [x for x in locations if not x[0] in diff] + [[x, self.getMountpoint(x), True, True] for x in add]
locations.sort(key = lambda x: x[0])
self.locations = locations
self.changed()
def getValue(self):
self.checkChangedMountpoints()
locations = self.locations
for x in locations:
x[3] = x[2]
return [x[0] for x in locations if x[3]]
value = property(getValue, setValue)
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
def load(self):
sv = self.saved_value
if sv is None:
tmp = self.default
else:
tmp = self.fromstring(sv)
locations = [[x, None, False, False] for x in tmp]
self.refreshMountpoints()
for x in locations:
if fileExists(x[0]):
x[1] = self.getMountpoint(x[0])
x[2] = True
self.locations = locations
def save(self):
locations = self.locations
if self.save_disabled or not locations:
self.saved_value = None
else:
self.saved_value = self.tostring([x[0] for x in locations])
def isChanged(self):
sv = self.saved_value
locations = self.locations
if val is None and not locations:
return False
return self.tostring([x[0] for x in locations]) != sv
def addedMount(self, mp):
for x in self.locations:
if x[1] == mp:
x[2] = True
elif x[1] == None and fileExists(x[0]):
x[1] = self.getMountpoint(x[0])
x[2] = True
def removedMount(self, mp):
for x in self.locations:
if x[1] == mp:
x[2] = False
def refreshMountpoints(self):
self.mountpoints = [p.mountpoint for p in harddiskmanager.getMountedPartitions() if p.mountpoint != "/"]
self.mountpoints.sort(key = lambda x: -len(x))
def checkChangedMountpoints(self):
oldmounts = self.mountpoints
self.refreshMountpoints()
newmounts = self.mountpoints
if oldmounts == newmounts:
return
for x in oldmounts:
if not x in newmounts:
self.removedMount(x)
for x in newmounts:
if not x in oldmounts:
self.addedMount(x)
def getMountpoint(self, file):
file = os_path.realpath(file)+"/"
for m in self.mountpoints:
if file.startswith(m):
return m
return None
def handleKey(self, key):
if key == KEY_LEFT:
self.pos -= 1
if self.pos < -1:
self.pos = len(self.value)-1
elif key == KEY_RIGHT:
self.pos += 1
if self.pos >= len(self.value):
self.pos = -1
elif key in (KEY_HOME, KEY_END):
self.pos = -1
def getText(self):
return " ".join(self.value)
def getMulti(self, selected):
if not selected:
valstr = " ".join(self.value)
if self.visible_width and len(valstr) > self.visible_width:
return ("text", valstr[0:self.visible_width])
else:
return ("text", valstr)
else:
i = 0
valstr = ""
ind1 = 0
ind2 = 0
for val in self.value:
if i == self.pos:
ind1 = len(valstr)
valstr += str(val)+" "
if i == self.pos:
ind2 = len(valstr)
i += 1
if self.visible_width and len(valstr) > self.visible_width:
if ind1+1 < self.visible_width/2:
off = 0
else:
off = min(ind1+1-self.visible_width/2, len(valstr)-self.visible_width)
return ("mtext", valstr[off:off+self.visible_width], range(ind1-off,ind2-off))
else:
return ("mtext", valstr, range(ind1,ind2))
def onDeselect(self, session):
self.pos = -1
# nothing.
class ConfigNothing(ConfigSelection):
def __init__(self):
ConfigSelection.__init__(self, choices = [("","")])
# until here, 'saved_value' always had to be a *string*.
# now, in ConfigSubsection, and only there, saved_value
# is a dict, essentially forming a tree.
#
# config.foo.bar=True
# config.foobar=False
#
# turns into:
# config.saved_value == {"foo": {"bar": "True"}, "foobar": "False"}
#
class ConfigSubsectionContent(object):
pass
# we store a backup of the loaded configuration
# data in self.stored_values, to be able to deploy
# them when a new config element will be added,
# so non-default values are instantly available
# A list, for example:
# config.dipswitches = ConfigSubList()
# config.dipswitches.append(ConfigYesNo())
# config.dipswitches.append(ConfigYesNo())
# config.dipswitches.append(ConfigYesNo())
class ConfigSubList(list, object):
def __init__(self):
list.__init__(self)
self.stored_values = {}
def save(self):
for x in self:
x.save()
def load(self):
for x in self:
x.load()
def getSavedValue(self):
res = { }
for i, val in enumerate(self):
sv = val.saved_value
if sv is not None:
res[str(i)] = sv
return res
def setSavedValue(self, values):
self.stored_values = dict(values)
for (key, val) in self.stored_values.items():
if int(key) < len(self):
self[int(key)].saved_value = val
saved_value = property(getSavedValue, setSavedValue)
def append(self, item):
i = str(len(self))
list.append(self, item)
if i in self.stored_values:
item.saved_value = self.stored_values[i]
item.load()
def dict(self):
return dict([(str(index), value) for index, value in enumerate(self)])
# same as ConfigSubList, just as a dictionary.
# care must be taken that the 'key' has a proper
# str() method, because it will be used in the config
# file.
class ConfigSubDict(dict, object):
def __init__(self):
dict.__init__(self)
self.stored_values = {}
def save(self):
for x in self.values():
x.save()
def load(self):
for x in self.values():
x.load()
def getSavedValue(self):
res = {}
for (key, val) in self.items():
sv = val.saved_value
if sv is not None:
res[str(key)] = sv
return res
def setSavedValue(self, values):
self.stored_values = dict(values)
for (key, val) in self.items():
if str(key) in self.stored_values:
val.saved_value = self.stored_values[str(key)]
saved_value = property(getSavedValue, setSavedValue)
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
if str(key) in self.stored_values:
item.saved_value = self.stored_values[str(key)]
item.load()
def dict(self):
return self
# Like the classes above, just with a more "native"
# syntax.
#
# some evil stuff must be done to allow instant
# loading of added elements. this is why this class
# is so complex.
#
# we need the 'content' because we overwrite
# __setattr__.
# If you don't understand this, try adding
# __setattr__ to a usual exisiting class and you will.
class ConfigSubsection(object):
def __init__(self):
self.__dict__["content"] = ConfigSubsectionContent()
self.content.items = { }
self.content.stored_values = { }
def __setattr__(self, name, value):
if name == "saved_value":
return self.setSavedValue(value)
assert isinstance(value, (ConfigSubsection, ConfigElement, ConfigSubList, ConfigSubDict)), "ConfigSubsections can only store ConfigSubsections, ConfigSubLists, ConfigSubDicts or ConfigElements"
content = self.content
content.items[name] = value
x = content.stored_values.get(name, None)
if x is not None:
#print "ok, now we have a new item,", name, "and have the following value for it:", x
value.saved_value = x
value.load()
def __getattr__(self, name):
return self.content.items[name]
def getSavedValue(self):
res = self.content.stored_values
for (key, val) in self.content.items.items():
sv = val.saved_value
if sv is not None:
res[key] = sv
elif key in res:
del res[key]
return res
def setSavedValue(self, values):
values = dict(values)
self.content.stored_values = values
for (key, val) in self.content.items.items():
value = values.get(key, None)
if value is not None:
val.saved_value = value
saved_value = property(getSavedValue, setSavedValue)
def save(self):
for x in self.content.items.values():
x.save()
def load(self):
for x in self.content.items.values():
x.load()
def dict(self):
return self.content.items
# the root config object, which also can "pickle" (=serialize)
# down the whole config tree.
#
# we try to keep non-existing config entries, to apply them whenever
# a new config entry is added to a subsection
# also, non-existing config entries will be saved, so they won't be
# lost when a config entry disappears.
class Config(ConfigSubsection):
def __init__(self):
ConfigSubsection.__init__(self)
def pickle_this(self, prefix, topickle, result):
for (key, val) in topickle.items():
name = '.'.join((prefix, key))
if isinstance(val, dict):
self.pickle_this(name, val, result)
elif isinstance(val, tuple):
result += [name, '=', val[0], '\n']
else:
result += [name, '=', val, '\n']
def pickle(self):
result = []
self.pickle_this("config", self.saved_value, result)
return ''.join(result)
def unpickle(self, lines):
tree = { }
for l in lines:
if not l or l[0] == '#':
continue
n = l.find('=')
val = l[n+1:].strip()
names = l[:n].split('.')
# if val.find(' ') != -1:
# val = val[:val.find(' ')]
base = tree
for n in names[:-1]:
base = base.setdefault(n, {})
base[names[-1]] = val
# we inherit from ConfigSubsection, so ...
#object.__setattr__(self, "saved_value", tree["config"])
if "config" in tree:
self.setSavedValue(tree["config"])
def saveToFile(self, filename):
text = self.pickle()
f = open(filename, "w")
f.write(text)
f.close()
def loadFromFile(self, filename):
f = open(filename, "r")
self.unpickle(f.readlines())
f.close()
config = Config()
config.misc = ConfigSubsection()
class ConfigFile:
CONFIG_FILE = resolveFilename(SCOPE_CONFIG, "settings")
def load(self):
try:
config.loadFromFile(self.CONFIG_FILE)
except IOError, e:
print "unable to load config (%s), assuming defaults..." % str(e)
def save(self):
# config.save()
config.saveToFile(self.CONFIG_FILE)
def __resolveValue(self, pickles, cmap):
key = pickles[0]
if cmap.has_key(key):
if len(pickles) > 1:
return self.__resolveValue(pickles[1:], cmap[key].dict())
else:
return str(cmap[key].value)
return None
def getResolvedKey(self, key):
names = key.split('.')
if len(names) > 1:
if names[0] == "config":
ret=self.__resolveValue(names[1:], config.content.items)
if ret and len(ret):
return ret
print "getResolvedKey", key, "failed !! (Typo??)"
return ""
def NoSave(element):
element.disableSave()
return element
configfile = ConfigFile()
configfile.load()
def getConfigListEntry(*args):
assert len(args) > 1, "getConfigListEntry needs a minimum of two arguments (descr, configElement)"
return args
def updateConfigElement(element, newelement):
newelement.value = element.value
return newelement
#def _(x):
# return x
#
#config.bla = ConfigSubsection()
#config.bla.test = ConfigYesNo()
#config.nim = ConfigSubList()
#config.nim.append(ConfigSubsection())
#config.nim[0].bla = ConfigYesNo()
#config.nim.append(ConfigSubsection())
#config.nim[1].bla = ConfigYesNo()
#config.nim[1].blub = ConfigYesNo()
#config.arg = ConfigSubDict()
#config.arg["Hello"] = ConfigYesNo()
#
#config.arg["Hello"].handleKey(KEY_RIGHT)
#config.arg["Hello"].handleKey(KEY_RIGHT)
#
##config.saved_value
#
##configfile.save()
#config.save()
#print config.pickle()
| gpl-2.0 | -5,019,229,601,695,854,000 | 25.584775 | 195 | 0.664454 | false |
rero/reroils-app | rero_ils/modules/loans/api.py | 1 | 6190 | # -*- coding: utf-8 -*-
#
# This file is part of RERO ILS.
# Copyright (C) 2017 RERO.
#
# RERO ILS is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# RERO ILS is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RERO ILS; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, RERO does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""API for manipulating Loans."""
from flask import current_app, url_for
from invenio_circulation.errors import CirculationException
from invenio_circulation.pidstore.fetchers import loan_pid_fetcher
from invenio_circulation.pidstore.minters import loan_pid_minter
from invenio_circulation.pidstore.providers import CirculationLoanIdProvider
from invenio_circulation.proxies import current_circulation
from invenio_circulation.search.api import search_by_patron_item
from invenio_jsonschemas import current_jsonschemas
from ..api import IlsRecord
from ..locations.api import Location
from ..patrons.api import Patron
class LoanAction(object):
"""Class holding all availabe circulation loan actions."""
REQUEST = 'request'
CHECKOUT = 'checkout'
CHECKIN = 'checkin'
VALIDATE = 'validate'
RECEIVE = 'receive'
RETURN_MISSING = 'return_missing'
EXTEND = 'extend'
CANCEL = 'cancel'
LOSE = 'lose'
NO = 'no'
class Loan(IlsRecord):
"""Loan class."""
minter = loan_pid_minter
fetcher = loan_pid_fetcher
provider = CirculationLoanIdProvider
pid_field = "loan_pid"
_schema = "loans/loan-ils-v0.0.1.json"
def __init__(self, data, model=None):
"""."""
self["state"] = current_app.config["CIRCULATION_LOAN_INITIAL_STATE"]
super(Loan, self).__init__(data, model)
@classmethod
def create(cls, data, id_=None, delete_pid=True,
dbcommit=False, reindex=False, **kwargs):
"""Create a new ils record."""
data["$schema"] = current_jsonschemas.path_to_url(cls._schema)
if delete_pid and data.get(cls.pid_field):
del(data[cls.pid_field])
record = super(Loan, cls).create(
data=data, id_=id_, delete_pid=delete_pid, dbcommit=dbcommit,
reindex=reindex, **kwargs)
return record
def dumps_for_circulation(self):
"""."""
loan = self.replace_refs()
data = loan.dumps()
patron = Patron.get_record_by_pid(loan['patron_pid'])
ptrn_data = patron.dumps()
data['patron'] = {}
data['patron']['barcode'] = ptrn_data['barcode']
data['patron']['name'] = ', '.join((
ptrn_data['first_name'], ptrn_data['last_name']))
if loan.get('pickup_location_pid'):
location = Location.get_record_by_pid(loan['pickup_location_pid'])
loc_data = location.dumps()
data['pickup_location'] = {}
data['pickup_location']['name'] = loc_data['name']
return data
def build_url_action_for_pid(self, action):
"""Build urls for Loan actions."""
mapping = {
'checkout': 'loan_item',
'validate': 'validate_item_request',
'receive': 'receive_item',
'checkin': 'return_item',
'request': 'request_item',
'extend': 'extend_loan',
'cancel': 'cancel',
}
item_pid_value = self.get('item_pid', '')
location = self.get('pickup_location_pid', '')
if action != 'request':
url = url_for('items.' + mapping[action])
else:
if self['state'] == 'CREATED':
# TODO: find a cleaner way to do this.
# request is the only action that requires two parameters
action = 'cancel'
url = url_for('items.' + mapping[action]).replace(
'cancel', 'request'
)
else:
url = url_for(
'items.' + mapping[action],
item_pid_value=item_pid_value,
location=location,
)
return url
def loan_links_factory(self):
"""Factory for links generation."""
links = {}
actions = {}
transitions_config = current_app.config.get(
'CIRCULATION_LOAN_TRANSITIONS', {}
)
for transition in transitions_config.get(self['state']):
action = transition.get('trigger', 'next')
actions[action] = self.build_url_action_for_pid(action)
links.setdefault('actions', actions)
return links
def get_request_by_item_pid_by_patron_pid(item_pid, patron_pid):
"""Get pending, item_on_transit, item_at_desk loans for item, patron."""
search = search_by_patron_item(
item_pid=item_pid,
patron_pid=patron_pid,
filter_states=[
'PENDING',
'ITEM_AT_DESK',
'ITEM_IN_TRANSIT_FOR_PICKUP',
'ITEM_IN_TRANSIT_TO_HOUSE',
],
)
search_result = search.execute()
if search_result.hits:
return search_result.hits.hits[0]['_source']
return {}
def get_loans_by_patron_pid(patron_pid):
"""Return all checkout for patron."""
if not patron_pid:
raise CirculationException('Patron PID not specified')
results = current_circulation.loan_search\
.source(['loan_pid'])\
.params(preserve_order=True)\
.filter('term', patron_pid=patron_pid)\
.sort({'transaction_date': {'order': 'asc'}})\
.scan()
for loan in results:
yield Loan.get_record_by_pid(loan.loan_pid)
| gpl-2.0 | -3,418,947,965,592,303,000 | 33.971751 | 78 | 0.609208 | false |
google/nsscache | nss_cache/maps/shadow.py | 1 | 2400 | # Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of a shadow map for nsscache.
ShadowMap: An implementation of NSS shadow maps based on the Map
class.
ShadowMapEntry: A shadow map entry based on the MapEntry class.
"""
__author__ = '[email protected] (Vasilios Hoffman)'
from nss_cache.maps import maps
class ShadowMap(maps.Map):
"""This class represents an NSS shadow map.
Map data is stored as a list of MapEntry objects, see the abstract
class Map.
"""
def __init__(self, iterable=None):
"""Construct a ShadowMap object using optional iterable."""
super(ShadowMap, self).__init__(iterable)
def Add(self, entry):
"""Add a new object, verify it is a ShadowMapEntry object."""
if not isinstance(entry, ShadowMapEntry):
raise TypeError
return super(ShadowMap, self).Add(entry)
class ShadowMapEntry(maps.MapEntry):
"""This class represents NSS shadow map entries."""
__slots__ = ('name', 'passwd', 'lstchg', 'min', 'max', 'warn', 'inact',
'expire', 'flag')
_KEY = 'name'
_ATTRS = ('name', 'passwd', 'lstchg', 'min', 'max', 'warn', 'inact',
'expire', 'flag')
def __init__(self, data=None):
"""Construct a ShadowMapEntry, setting reasonable defaults."""
self.name = None
self.passwd = None
self.lstchg = None
self.min = None
self.max = None
self.warn = None
self.inact = None
self.expire = None
self.flag = None
super(ShadowMapEntry, self).__init__(data)
# Seed data with defaults if needed
if self.passwd is None:
self.passwd = '!!'
| gpl-2.0 | 9,021,171,384,939,344,000 | 32.802817 | 75 | 0.652917 | false |
mr3bn/DAT210x | Module2/assignment4.py | 1 | 2346 | import pandas as pd
# TODO: Load up the table, and extract the dataset
# out of it. If you're having issues with this, look
# carefully at the sample code provided in the reading
df = pd.read_html('http://espn.go.com/nhl/statistics/player/_/stat/points/sort/points/year/2015/seasontype/2', header=1)[0]
# TODO: Rename the columns so that they are similar to the
# column definitions provided to you on the website.
# Be careful and don't accidentially use any names twice.
#
n = ['Rank', 'Player', 'Team', 'GamesPlayed', 'Goals', 'Assists', 'Points', 'PlusMinus', 'PenaltyMinutes', 'PointsPerGame', 'ShotsOnGoal', 'ShootingPct', 'GameWinners', 'PowerPlayGoals', 'PowerPlayAssists', 'ShortHandGoals', 'ShortHandAssists']
df.columns = n
# TODO: Get rid of any row that has at least 4 NANs in it,
# e.g. that do not contain player points statistics
df = df[df.isnull().sum(axis=1) < 4]
# TODO: At this point, look through your dataset by printing
# it. There probably still are some erroneous rows in there.
# What indexing command(s) can you use to select all rows
# EXCEPT those rows?
#
for i in range(0, len(df.columns)):
c = df.iloc[:, i]
if pd.isnull(pd.to_numeric(c[0], errors='coerce')):
next
else:
c = pd.to_numeric(c, errors='coerce')
df.iloc[:, i] = c
df = df[df.isnull().sum(axis=1) != 15]
# TODO: Get rid of the 'RK' column
del df['Rank']
# TODO: Ensure there are no holes in your index by resetting
# it. By the way, don't store the original index
df.reset_index(drop=1)
# TODO: Check the data type of all columns, and ensure those
# that should be numeric are numeric
#
for i in range(0, len(df.columns)):
# print the column's name and datatype of its elements
# each column should be atomic at this point,
# so we're just checking for proper type
c = df.iloc[:, i]
typeString = c.dtype.str
print df.columns[i] + ': ' + typeString
# TODO: Your dataframe is now ready! Use the appropriate
# commands to answer the questions on the course lab page.
#
# how many rows remain in the dataset?
print len(df)
# how many unique PCT values exist in the table?
print len(df.loc[:, 'ShootingPct'].unique())
# what's the value of adding games played at indeces 15 and 16?
# ...actually 14 and 15.....
print df.loc[14, 'GamesPlayed'] + df.loc[15, 'GamesPlayed']
| mit | 8,383,620,943,496,560,000 | 31.583333 | 244 | 0.689685 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.